diff options
Diffstat (limited to 'drivers')
567 files changed, 80730 insertions, 21642 deletions
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 8cbfaa687d72..fe81c851ca88 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -2177,7 +2177,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms return; } - if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { + if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) { retcode = ERR_PERM; goto fail; } diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 6dcd55a74c0a..5577ed656e2f 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -31,6 +31,30 @@ #define VERSION "1.0" +#define ATH3K_DNLOAD 0x01 +#define ATH3K_GETSTATE 0x05 +#define ATH3K_SET_NORMAL_MODE 0x07 +#define ATH3K_GETVERSION 0x09 +#define USB_REG_SWITCH_VID_PID 0x0a + +#define ATH3K_MODE_MASK 0x3F +#define ATH3K_NORMAL_MODE 0x0E + +#define ATH3K_PATCH_UPDATE 0x80 +#define ATH3K_SYSCFG_UPDATE 0x40 + +#define ATH3K_XTAL_FREQ_26M 0x00 +#define ATH3K_XTAL_FREQ_40M 0x01 +#define ATH3K_XTAL_FREQ_19P2 0x02 +#define ATH3K_NAME_LEN 0xFF + +struct ath3k_version { + unsigned int rom_version; + unsigned int build_version; + unsigned int ram_version; + unsigned char ref_clock; + unsigned char reserved[0x07]; +}; static struct usb_device_id ath3k_table[] = { /* Atheros AR3011 */ @@ -42,15 +66,31 @@ static struct usb_device_id ath3k_table[] = { /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, + /* Atheros AR3012 with sflash firmware*/ + { USB_DEVICE(0x0CF3, 0x3004) }, + /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, + { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ath3k_table); +#define BTUSB_ATH3012 0x80 +/* This table is to load patch and sysconfig files + * for AR3012 */ +static struct usb_device_id ath3k_blist_tbl[] = { + + /* Atheros AR3012 with sflash firmware*/ + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, + + { } /* Terminating entry */ +}; + #define USB_REQ_DFU_DNLOAD 1 #define BULK_SIZE 4096 +#define FW_HDR_SIZE 20 static int ath3k_load_firmware(struct usb_device *udev, const struct firmware *firmware) @@ -106,28 +146,265 @@ error: return err; } +static int ath3k_get_state(struct usb_device *udev, unsigned char *state) +{ + int pipe = 0; + + pipe = usb_rcvctrlpipe(udev, 0); + return usb_control_msg(udev, pipe, ATH3K_GETSTATE, + USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, + state, 0x01, USB_CTRL_SET_TIMEOUT); +} + +static int ath3k_get_version(struct usb_device *udev, + struct ath3k_version *version) +{ + int pipe = 0; + + pipe = usb_rcvctrlpipe(udev, 0); + return usb_control_msg(udev, pipe, ATH3K_GETVERSION, + USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, + sizeof(struct ath3k_version), + USB_CTRL_SET_TIMEOUT); +} + +static int ath3k_load_fwfile(struct usb_device *udev, + const struct firmware *firmware) +{ + u8 *send_buf; + int err, pipe, len, size, count, sent = 0; + int ret; + + count = firmware->size; + + send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC); + if (!send_buf) { + BT_ERR("Can't allocate memory chunk for firmware"); + return -ENOMEM; + } + + size = min_t(uint, count, FW_HDR_SIZE); + memcpy(send_buf, firmware->data, size); + + pipe = usb_sndctrlpipe(udev, 0); + ret = usb_control_msg(udev, pipe, ATH3K_DNLOAD, + USB_TYPE_VENDOR, 0, 0, send_buf, + size, USB_CTRL_SET_TIMEOUT); + if (ret < 0) { + BT_ERR("Can't change to loading configuration err"); + kfree(send_buf); + return ret; + } + + sent += size; + count -= size; + + while (count) { + size = min_t(uint, count, BULK_SIZE); + pipe = usb_sndbulkpipe(udev, 0x02); + + memcpy(send_buf, firmware->data + sent, size); + + err = usb_bulk_msg(udev, pipe, send_buf, size, + &len, 3000); + if (err || (len != size)) { + BT_ERR("Error in firmware loading err = %d," + "len = %d, size = %d", err, len, size); + kfree(send_buf); + return err; + } + sent += size; + count -= size; + } + + kfree(send_buf); + return 0; +} + +static int ath3k_switch_pid(struct usb_device *udev) +{ + int pipe = 0; + + pipe = usb_sndctrlpipe(udev, 0); + return usb_control_msg(udev, pipe, USB_REG_SWITCH_VID_PID, + USB_TYPE_VENDOR, 0, 0, + NULL, 0, USB_CTRL_SET_TIMEOUT); +} + +static int ath3k_set_normal_mode(struct usb_device *udev) +{ + unsigned char fw_state; + int pipe = 0, ret; + + ret = ath3k_get_state(udev, &fw_state); + if (ret < 0) { + BT_ERR("Can't get state to change to normal mode err"); + return ret; + } + + if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) { + BT_DBG("firmware was already in normal mode"); + return 0; + } + + pipe = usb_sndctrlpipe(udev, 0); + return usb_control_msg(udev, pipe, ATH3K_SET_NORMAL_MODE, + USB_TYPE_VENDOR, 0, 0, + NULL, 0, USB_CTRL_SET_TIMEOUT); +} + +static int ath3k_load_patch(struct usb_device *udev) +{ + unsigned char fw_state; + char filename[ATH3K_NAME_LEN] = {0}; + const struct firmware *firmware; + struct ath3k_version fw_version, pt_version; + int ret; + + ret = ath3k_get_state(udev, &fw_state); + if (ret < 0) { + BT_ERR("Can't get state to change to load ram patch err"); + return ret; + } + + if (fw_state & ATH3K_PATCH_UPDATE) { + BT_DBG("Patch was already downloaded"); + return 0; + } + + ret = ath3k_get_version(udev, &fw_version); + if (ret < 0) { + BT_ERR("Can't get version to change to load ram patch err"); + return ret; + } + + snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu", + fw_version.rom_version); + + ret = request_firmware(&firmware, filename, &udev->dev); + if (ret < 0) { + BT_ERR("Patch file not found %s", filename); + return ret; + } + + pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8); + pt_version.build_version = *(int *) + (firmware->data + firmware->size - 4); + + if ((pt_version.rom_version != fw_version.rom_version) || + (pt_version.build_version <= fw_version.build_version)) { + BT_ERR("Patch file version did not match with firmware"); + release_firmware(firmware); + return -EINVAL; + } + + ret = ath3k_load_fwfile(udev, firmware); + release_firmware(firmware); + + return ret; +} + +static int ath3k_load_syscfg(struct usb_device *udev) +{ + unsigned char fw_state; + char filename[ATH3K_NAME_LEN] = {0}; + const struct firmware *firmware; + struct ath3k_version fw_version; + int clk_value, ret; + + ret = ath3k_get_state(udev, &fw_state); + if (ret < 0) { + BT_ERR("Can't get state to change to load configration err"); + return -EBUSY; + } + + ret = ath3k_get_version(udev, &fw_version); + if (ret < 0) { + BT_ERR("Can't get version to change to load ram patch err"); + return ret; + } + + switch (fw_version.ref_clock) { + + case ATH3K_XTAL_FREQ_26M: + clk_value = 26; + break; + case ATH3K_XTAL_FREQ_40M: + clk_value = 40; + break; + case ATH3K_XTAL_FREQ_19P2: + clk_value = 19; + break; + default: + clk_value = 0; + break; + } + + snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s", + fw_version.rom_version, clk_value, ".dfu"); + + ret = request_firmware(&firmware, filename, &udev->dev); + if (ret < 0) { + BT_ERR("Configuration file not found %s", filename); + return ret; + } + + ret = ath3k_load_fwfile(udev, firmware); + release_firmware(firmware); + + return ret; +} + static int ath3k_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct firmware *firmware; struct usb_device *udev = interface_to_usbdev(intf); + int ret; BT_DBG("intf %p id %p", intf, id); if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; - if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { - return -EIO; + /* match device ID in ath3k blacklist table */ + if (!id->driver_info) { + const struct usb_device_id *match; + match = usb_match_id(intf, ath3k_blist_tbl); + if (match) + id = match; } - if (ath3k_load_firmware(udev, firmware)) { - release_firmware(firmware); + /* load patch and sysconfig files for AR3012 */ + if (id->driver_info & BTUSB_ATH3012) { + ret = ath3k_load_patch(udev); + if (ret < 0) { + BT_ERR("Loading patch file failed"); + return ret; + } + ret = ath3k_load_syscfg(udev); + if (ret < 0) { + BT_ERR("Loading sysconfig file failed"); + return ret; + } + ret = ath3k_set_normal_mode(udev); + if (ret < 0) { + BT_ERR("Set normal mode failed"); + return ret; + } + ath3k_switch_pid(udev); + return 0; + } + + if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) { + BT_ERR("Error loading firmware"); return -EIO; } + + ret = ath3k_load_firmware(udev, firmware); release_firmware(firmware); - return 0; + return ret; } static void ath3k_disconnect(struct usb_interface *intf) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 700a3840fddc..7e0ebd4a1a74 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -105,6 +105,9 @@ static struct usb_device_id blacklist_table[] = { /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, + /* Atheros 3012 with sflash firmware */ + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_IGNORE }, + /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, @@ -714,15 +717,11 @@ static int btusb_send_frame(struct sk_buff *skb) pipe = usb_sndisocpipe(data->udev, data->isoc_tx_ep->bEndpointAddress); - urb->dev = data->udev; - urb->pipe = pipe; - urb->context = skb; - urb->complete = btusb_isoc_tx_complete; - urb->interval = data->isoc_tx_ep->bInterval; + usb_fill_int_urb(urb, data->udev, pipe, + skb->data, skb->len, btusb_isoc_tx_complete, + skb, data->isoc_tx_ep->bInterval); urb->transfer_flags = URB_ISO_ASAP; - urb->transfer_buffer = skb->data; - urb->transfer_buffer_length = skb->len; __fill_isoc_descriptor(urb, skb->len, le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 3c6cabcb7d84..48ad2a7ab080 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -398,6 +398,7 @@ static int hci_uart_register_dev(struct hci_uart *hu) hdev->flush = hci_uart_flush; hdev->send = hci_uart_send_frame; hdev->destruct = hci_uart_destruct; + hdev->parent = hu->tty->dev; hdev->owner = THIS_MODULE; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 8aba0ba57de5..e0ef5fdc361e 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -183,20 +183,15 @@ static int addr4_resolve(struct sockaddr_in *src_in, { __be32 src_ip = src_in->sin_addr.s_addr; __be32 dst_ip = dst_in->sin_addr.s_addr; - struct flowi fl; struct rtable *rt; struct neighbour *neigh; int ret; - memset(&fl, 0, sizeof fl); - fl.nl_u.ip4_u.daddr = dst_ip; - fl.nl_u.ip4_u.saddr = src_ip; - fl.oif = addr->bound_dev_if; - - ret = ip_route_output_key(&init_net, &rt, &fl); - if (ret) + rt = ip_route_output(&init_net, dst_ip, src_ip, 0, addr->bound_dev_if); + if (IS_ERR(rt)) { + ret = PTR_ERR(rt); goto out; - + } src_in->sin_family = AF_INET; src_in->sin_addr.s_addr = rt->rt_src; @@ -236,28 +231,28 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, struct sockaddr_in6 *dst_in, struct rdma_dev_addr *addr) { - struct flowi fl; + struct flowi6 fl6; struct neighbour *neigh; struct dst_entry *dst; int ret; - memset(&fl, 0, sizeof fl); - ipv6_addr_copy(&fl.fl6_dst, &dst_in->sin6_addr); - ipv6_addr_copy(&fl.fl6_src, &src_in->sin6_addr); - fl.oif = addr->bound_dev_if; + memset(&fl6, 0, sizeof fl6); + ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr); + ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr); + fl6.flowi6_oif = addr->bound_dev_if; - dst = ip6_route_output(&init_net, NULL, &fl); + dst = ip6_route_output(&init_net, NULL, &fl6); if ((ret = dst->error)) goto put; - if (ipv6_addr_any(&fl.fl6_src)) { + if (ipv6_addr_any(&fl6.saddr)) { ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev, - &fl.fl6_dst, 0, &fl.fl6_src); + &fl6.daddr, 0, &fl6.saddr); if (ret) goto put; src_in->sin6_family = AF_INET6; - ipv6_addr_copy(&src_in->sin6_addr, &fl.fl6_src); + ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr); } if (dst->dev->flags & IFF_LOOPBACK) { diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index d02dcc6e5963..3216bcad7e82 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -338,23 +338,11 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip, __be16 peer_port, u8 tos) { struct rtable *rt; - struct flowi fl = { - .oif = 0, - .nl_u = { - .ip4_u = { - .daddr = peer_ip, - .saddr = local_ip, - .tos = tos} - }, - .proto = IPPROTO_TCP, - .uli_u = { - .ports = { - .sport = local_port, - .dport = peer_port} - } - }; - - if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) + + rt = ip_route_output_ports(&init_net, NULL, peer_ip, local_ip, + peer_port, local_port, IPPROTO_TCP, + tos, 0); + if (IS_ERR(rt)) return NULL; return rt; } diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b4d9e4caf3c9..9d8dcfab2b38 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -315,23 +315,11 @@ static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, __be16 peer_port, u8 tos) { struct rtable *rt; - struct flowi fl = { - .oif = 0, - .nl_u = { - .ip4_u = { - .daddr = peer_ip, - .saddr = local_ip, - .tos = tos} - }, - .proto = IPPROTO_TCP, - .uli_u = { - .ports = { - .sport = local_port, - .dport = peer_port} - } - }; - - if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) + + rt = ip_route_output_ports(&init_net, NULL, peer_ip, local_ip, + peer_port, local_port, IPPROTO_TCP, + tos, 0); + if (IS_ERR(rt)) return NULL; return rt; } diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 3b4ec3238ceb..3d7f3664b67b 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -153,7 +153,8 @@ static int nes_inetaddr_event(struct notifier_block *notifier, nesdev, nesdev->netdev[0]->name); netdev = nesdev->netdev[0]; nesvnic = netdev_priv(netdev); - is_bonded = (netdev->master == event_netdev); + is_bonded = netif_is_bond_slave(netdev) && + (netdev->master == event_netdev); if ((netdev == event_netdev) || is_bonded) { if (nesvnic->rdma_enabled == 0) { nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since" diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 009ec814d517..ef3291551bc6 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1104,21 +1104,19 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core, static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpindex) { struct rtable *rt; - struct flowi fl; struct neighbour *neigh; int rc = arpindex; struct net_device *netdev; struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; - memset(&fl, 0, sizeof fl); - fl.nl_u.ip4_u.daddr = htonl(dst_ip); - if (ip_route_output_key(&init_net, &rt, &fl)) { + rt = ip_route_output(&init_net, htonl(dst_ip), 0, 0, 0); + if (IS_ERR(rt)) { printk(KERN_ERR "%s: ip_route_output_key failed for 0x%08X\n", __func__, dst_ip); return rc; } - if (nesvnic->netdev->master) + if (netif_is_bond_slave(netdev)) netdev = nesvnic->netdev->master; else netdev = nesvnic->netdev; diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 049eaf12aaab..1f23e048f077 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); - if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) + if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) return; spin_lock(&receiving_list_lock); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 03823327db25..dc280bc8eba2 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -238,8 +238,8 @@ source "drivers/net/arm/Kconfig" config AX88796 tristate "ASIX AX88796 NE2000 clone support" depends on ARM || MIPS || SUPERH - select CRC32 - select MII + select PHYLIB + select MDIO_BITBANG help AX88796 driver, using platform bus to provide chip detection and resources @@ -1498,7 +1498,7 @@ config FORCEDETH config CS89x0 tristate "CS89x0 support" depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \ - || ARCH_IXDP2X01 || MACH_MX31ADS) + || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440) ---help--- Support for CS89x0 chipset based Ethernet cards. If you have a network (Ethernet) card of this type, say Y and read the @@ -1512,7 +1512,7 @@ config CS89x0 config CS89x0_NONISA_IRQ def_bool y depends on CS89x0 != n - depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS + depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440 config TC35815 tristate "TOSHIBA TC35815 Ethernet support" @@ -1944,7 +1944,8 @@ config 68360_ENET config FEC bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ - MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28 + IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC + default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM select PHYLIB help Say Y here if you want to use the built-in 10/100 Fast ethernet @@ -2007,6 +2008,15 @@ config BCM63XX_ENET This driver supports the ethernet MACs in the Broadcom 63xx MIPS chipset family (BCM63XX). +config FTMAC100 + tristate "Faraday FTMAC100 10/100 Ethernet support" + depends on ARM + select MII + help + This driver supports the FTMAC100 10/100 Ethernet controller + from Faraday. It is used on Faraday A320, Andes AG101 and some + other ARM/NDS32 SoC's. + source "drivers/net/fs_enet/Kconfig" source "drivers/net/octeon/Kconfig" @@ -2098,7 +2108,9 @@ config E1000 config E1000E tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" + select CRC32 depends on PCI && (!SPARC32 || BROKEN) + select CRC32 ---help--- This driver supports the PCI-Express Intel(R) PRO/1000 gigabit ethernet family of adapters. For PCI or PCI-X e1000 adapters, @@ -2235,15 +2247,6 @@ config R8169 To compile this driver as a module, choose M here: the module will be called r8169. This is recommended. -config R8169_VLAN - bool "VLAN support" - depends on R8169 && VLAN_8021Q - ---help--- - Say Y here for the r8169 driver to support the functions required - by the kernel 802.1Q code. - - If in doubt, say Y. - config SB1250_MAC tristate "SB1250 Gigabit Ethernet support" depends on SIBYTE_SB1xxx_SOC @@ -2594,14 +2597,9 @@ config CHELSIO_T1_1G Enables support for Chelsio's gigabit Ethernet PCI cards. If you are using only 10G cards say 'N' here. -config CHELSIO_T3_DEPENDS - tristate - depends on PCI && INET - default y - config CHELSIO_T3 tristate "Chelsio Communications T3 10Gb Ethernet support" - depends on CHELSIO_T3_DEPENDS + depends on PCI && INET select FW_LOADER select MDIO help @@ -2619,14 +2617,9 @@ config CHELSIO_T3 To compile this driver as a module, choose M here: the module will be called cxgb3. -config CHELSIO_T4_DEPENDS - tristate - depends on PCI && INET - default y - config CHELSIO_T4 tristate "Chelsio Communications T4 Ethernet support" - depends on CHELSIO_T4_DEPENDS + depends on PCI select FW_LOADER select MDIO help @@ -2644,14 +2637,9 @@ config CHELSIO_T4 To compile this driver as a module choose M here; the module will be called cxgb4. -config CHELSIO_T4VF_DEPENDS - tristate - depends on PCI && INET - default y - config CHELSIO_T4VF tristate "Chelsio Communications T4 Virtual Function Ethernet support" - depends on CHELSIO_T4VF_DEPENDS + depends on PCI help This driver supports Chelsio T4-based gigabit and 10Gb Ethernet adapters with PCI-E SR-IOV Virtual Functions. @@ -2966,12 +2954,38 @@ config XEN_NETDEV_FRONTEND select XEN_XENBUS_FRONTEND default y help - The network device frontend driver allows the kernel to - access network devices exported exported by a virtual - machine containing a physical network device driver. The - frontend driver is intended for unprivileged guest domains; - if you are compiling a kernel for a Xen guest, you almost - certainly want to enable this. + This driver provides support for Xen paravirtual network + devices exported by a Xen network driver domain (often + domain 0). + + The corresponding Linux backend driver is enabled by the + CONFIG_XEN_NETDEV_BACKEND option. + + If you are compiling a kernel for use as Xen guest, you + should say Y here. To compile this driver as a module, chose + M here: the module will be called xen-netfront. + +config XEN_NETDEV_BACKEND + tristate "Xen backend network device" + depends on XEN_BACKEND + help + This driver allows the kernel to act as a Xen network driver + domain which exports paravirtual network devices to other + Xen domains. These devices can be accessed by any operating + system that implements a compatible front end. + + The corresponding Linux frontend driver is enabled by the + CONFIG_XEN_NETDEV_FRONTEND configuration option. + + The backend driver presents a standard network device + endpoint for each paravirtual network device to the driver + domain network stack. These can then be bridged or routed + etc in order to provide full network connectivity. + + If you are compiling a kernel to run in a Xen network driver + domain (often this is domain 0) you should say Y here. To + compile this driver as a module, chose M here: the module + will be called xen-netback. config ISERIES_VETH tristate "iSeries Virtual Ethernet driver support" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index b90738d13994..01b604ad155e 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -147,6 +147,7 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o obj-$(CONFIG_AX88796) += ax88796.o obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o +obj-$(CONFIG_FTMAC100) += ftmac100.o obj-$(CONFIG_TSI108_ETH) += tsi108_eth.o obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o @@ -171,6 +172,7 @@ obj-$(CONFIG_SLIP) += slip.o obj-$(CONFIG_SLHC) += slhc.o obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o +obj-$(CONFIG_XEN_NETDEV_BACKEND) += xen-netback/ obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_IFB) += ifb.o diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c index 1bf672009948..23f2ab0f2fa8 100644 --- a/drivers/net/atl1c/atl1c_hw.c +++ b/drivers/net/atl1c/atl1c_hw.c @@ -345,7 +345,7 @@ int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data) */ static int atl1c_phy_setup_adv(struct atl1c_hw *hw) { - u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK; + u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL; u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP & ~GIGA_CR_1000T_SPEED_MASK; @@ -373,7 +373,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw) } if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 || - atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0) + atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0) return -1; return 0; } @@ -517,19 +517,18 @@ int atl1c_phy_init(struct atl1c_hw *hw) "Error Setting up Auto-Negotiation\n"); return ret_val; } - mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; + mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; break; case MEDIA_TYPE_100M_FULL: - mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX; + mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX; break; case MEDIA_TYPE_100M_HALF: - mii_bmcr_data |= BMCR_SPEED_100; + mii_bmcr_data |= BMCR_SPEED100; break; case MEDIA_TYPE_10M_FULL: - mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX; + mii_bmcr_data |= BMCR_FULLDPLX; break; case MEDIA_TYPE_10M_HALF: - mii_bmcr_data |= BMCR_SPEED_10; break; default: if (netif_msg_link(adapter)) @@ -657,7 +656,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw) err = atl1c_phy_setup_adv(hw); if (err) return err; - mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG; + mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); } diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h index 3dd675979aa1..655fc6c4a8a4 100644 --- a/drivers/net/atl1c/atl1c_hw.h +++ b/drivers/net/atl1c/atl1c_hw.h @@ -736,55 +736,16 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw); #define REG_DEBUG_DATA0 0x1900 #define REG_DEBUG_DATA1 0x1904 -/* PHY Control Register */ -#define MII_BMCR 0x00 -#define BMCR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define BMCR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ -#define BMCR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ -#define BMCR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ -#define BMCR_ISOLATE 0x0400 /* Isolate PHY from MII */ -#define BMCR_POWER_DOWN 0x0800 /* Power down */ -#define BMCR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define BMCR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define BMCR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define BMCR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ -#define BMCR_SPEED_MASK 0x2040 -#define BMCR_SPEED_1000 0x0040 -#define BMCR_SPEED_100 0x2000 -#define BMCR_SPEED_10 0x0000 - -/* PHY Status Register */ -#define MII_BMSR 0x01 -#define BMMSR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ -#define BMSR_JABBER_DETECT 0x0002 /* Jabber Detected */ -#define BMSR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define BMSR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ -#define BMSR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ -#define BMSR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ -#define BMSR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ -#define BMSR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ -#define BMSR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ -#define BMSR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ -#define BMSR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ -#define BMSR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ -#define BMSR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ -#define BMMII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ -#define BMMII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ - -#define MII_PHYSID1 0x02 -#define MII_PHYSID2 0x03 #define L1D_MPW_PHYID1 0xD01C /* V7 */ #define L1D_MPW_PHYID2 0xD01D /* V1-V6 */ #define L1D_MPW_PHYID3 0xD01E /* V8 */ /* Autoneg Advertisement Register */ -#define MII_ADVERTISE 0x04 -#define ADVERTISE_SPEED_MASK 0x01E0 -#define ADVERTISE_DEFAULT_CAP 0x0DE0 +#define ADVERTISE_DEFAULT_CAP \ + (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM) /* 1000BASE-T Control Register */ -#define MII_GIGA_CR 0x09 #define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */ #define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index 3824382faecc..7d9d5067a65c 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -1102,10 +1102,10 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter) AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data); max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) & DEVICE_CTRL_MAX_PAYLOAD_MASK; - hw->dmaw_block = min(max_pay_load, hw->dmaw_block); + hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block); max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) & DEVICE_CTRL_MAX_RREQ_SZ_MASK; - hw->dmar_block = min(max_pay_load, hw->dmar_block); + hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) << TXQ_NUM_TPD_BURST_SHIFT; @@ -2718,7 +2718,6 @@ static int __devinit atl1c_probe(struct pci_dev *pdev, goto err_reset; } - device_init_wakeup(&pdev->dev, 1); /* reset the controller to * put the device in a known good starting state */ err = atl1c_phy_init(&adapter->hw); diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c index 6943a6c3b948..1209297433b8 100644 --- a/drivers/net/atl1e/atl1e_ethtool.c +++ b/drivers/net/atl1e/atl1e_ethtool.c @@ -95,18 +95,18 @@ static int atl1e_set_settings(struct net_device *netdev, ecmd->advertising = hw->autoneg_advertised | ADVERTISED_TP | ADVERTISED_Autoneg; - adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK; + adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL; adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; if (hw->autoneg_advertised & ADVERTISE_10_HALF) - adv4 |= MII_AR_10T_HD_CAPS; + adv4 |= ADVERTISE_10HALF; if (hw->autoneg_advertised & ADVERTISE_10_FULL) - adv4 |= MII_AR_10T_FD_CAPS; + adv4 |= ADVERTISE_10FULL; if (hw->autoneg_advertised & ADVERTISE_100_HALF) - adv4 |= MII_AR_100TX_HD_CAPS; + adv4 |= ADVERTISE_100HALF; if (hw->autoneg_advertised & ADVERTISE_100_FULL) - adv4 |= MII_AR_100TX_FD_CAPS; + adv4 |= ADVERTISE_100FULL; if (hw->autoneg_advertised & ADVERTISE_1000_FULL) - adv9 |= MII_AT001_CR_1000T_FD_CAPS; + adv9 |= ADVERTISE_1000FULL; if (adv4 != hw->mii_autoneg_adv_reg || adv9 != hw->mii_1000t_ctrl_reg) { diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c index 76cc043def8c..923063d2e5bb 100644 --- a/drivers/net/atl1e/atl1e_hw.c +++ b/drivers/net/atl1e/atl1e_hw.c @@ -318,7 +318,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T control Register (Address 9). */ - mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; + mii_autoneg_adv_reg &= ~ADVERTISE_ALL; mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; /* @@ -327,44 +327,37 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) */ switch (hw->media_type) { case MEDIA_TYPE_AUTO_SENSOR: - mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | - MII_AR_10T_FD_CAPS | - MII_AR_100TX_HD_CAPS | - MII_AR_100TX_FD_CAPS); - hw->autoneg_advertised = ADVERTISE_10_HALF | - ADVERTISE_10_FULL | - ADVERTISE_100_HALF | - ADVERTISE_100_FULL; + mii_autoneg_adv_reg |= ADVERTISE_ALL; + hw->autoneg_advertised = ADVERTISE_ALL; if (hw->nic_type == athr_l1e) { - mii_1000t_ctrl_reg |= - MII_AT001_CR_1000T_FD_CAPS; + mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; hw->autoneg_advertised |= ADVERTISE_1000_FULL; } break; case MEDIA_TYPE_100M_FULL: - mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; + mii_autoneg_adv_reg |= ADVERTISE_100FULL; hw->autoneg_advertised = ADVERTISE_100_FULL; break; case MEDIA_TYPE_100M_HALF: - mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; + mii_autoneg_adv_reg |= ADVERTISE_100_HALF; hw->autoneg_advertised = ADVERTISE_100_HALF; break; case MEDIA_TYPE_10M_FULL: - mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; + mii_autoneg_adv_reg |= ADVERTISE_10_FULL; hw->autoneg_advertised = ADVERTISE_10_FULL; break; default: - mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; + mii_autoneg_adv_reg |= ADVERTISE_10_HALF; hw->autoneg_advertised = ADVERTISE_10_HALF; break; } /* flow control fixed to enable all */ - mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); + mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; @@ -374,7 +367,7 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) return ret_val; if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { - ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR, + ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000, mii_1000t_ctrl_reg); if (ret_val) return ret_val; @@ -397,7 +390,7 @@ int atl1e_phy_commit(struct atl1e_hw *hw) int ret_val; u16 phy_data; - phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; + phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); if (ret_val) { @@ -645,15 +638,14 @@ int atl1e_restart_autoneg(struct atl1e_hw *hw) return err; if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { - err = atl1e_write_phy_reg(hw, MII_AT001_CR, + err = atl1e_write_phy_reg(hw, MII_CTRL1000, hw->mii_1000t_ctrl_reg); if (err) return err; } err = atl1e_write_phy_reg(hw, MII_BMCR, - MII_CR_RESET | MII_CR_AUTO_NEG_EN | - MII_CR_RESTART_AUTO_NEG); + BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART); return err; } diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h index 5ea2f4d86cfa..74df16aef793 100644 --- a/drivers/net/atl1e/atl1e_hw.h +++ b/drivers/net/atl1e/atl1e_hw.h @@ -629,127 +629,24 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw); /***************************** MII definition ***************************************/ /* PHY Common Register */ -#define MII_BMCR 0x00 -#define MII_BMSR 0x01 -#define MII_PHYSID1 0x02 -#define MII_PHYSID2 0x03 -#define MII_ADVERTISE 0x04 -#define MII_LPA 0x05 -#define MII_EXPANSION 0x06 -#define MII_AT001_CR 0x09 -#define MII_AT001_SR 0x0A -#define MII_AT001_ESR 0x0F #define MII_AT001_PSCR 0x10 #define MII_AT001_PSSR 0x11 #define MII_INT_CTRL 0x12 #define MII_INT_STATUS 0x13 #define MII_SMARTSPEED 0x14 -#define MII_RERRCOUNTER 0x15 -#define MII_SREVISION 0x16 -#define MII_RESV1 0x17 #define MII_LBRERROR 0x18 -#define MII_PHYADDR 0x19 #define MII_RESV2 0x1a -#define MII_TPISTATUS 0x1b -#define MII_NCONFIG 0x1c #define MII_DBG_ADDR 0x1D #define MII_DBG_DATA 0x1E - -/* PHY Control Register */ -#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ -#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ -#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ -#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ -#define MII_CR_POWER_DOWN 0x0800 /* Power down */ -#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ -#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ -#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ -#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ -#define MII_CR_SPEED_MASK 0x2040 -#define MII_CR_SPEED_1000 0x0040 -#define MII_CR_SPEED_100 0x2000 -#define MII_CR_SPEED_10 0x0000 - - -/* PHY Status Register */ -#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ -#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ -#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ -#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ -#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ -#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ -#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ -#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ -#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ -#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ -#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ -#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ -#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ -#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ -#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ - -/* Link partner ability register. */ -#define MII_LPA_SLCT 0x001f /* Same as advertise selector */ -#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ -#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ -#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ -#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ -#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */ -#define MII_LPA_PAUSE 0x0400 /* PAUSE */ -#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */ -#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */ -#define MII_LPA_LPACK 0x4000 /* Link partner acked us */ -#define MII_LPA_NPAGE 0x8000 /* Next page bit */ - /* Autoneg Advertisement Register */ -#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ -#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ -#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ -#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ -#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ -#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ -#define MII_AR_PAUSE 0x0400 /* Pause operation desired */ -#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ -#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ -#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ -#define MII_AR_SPEED_MASK 0x01E0 -#define MII_AR_DEFAULT_CAP_MASK 0x0DE0 +#define MII_AR_DEFAULT_CAP_MASK 0 /* 1000BASE-T Control Register */ -#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ -#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ -#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ -/* 0=DTE device */ -#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ -/* 0=Configure PHY as Slave */ -#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ -/* 0=Automatic Master/Slave config */ -#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ -#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ -#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ -#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ -#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ -#define MII_AT001_CR_1000T_SPEED_MASK 0x0300 -#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300 - -/* 1000BASE-T Status Register */ -#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ -#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ -#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ -#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ -#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ -#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ -#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12 -#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13 - -/* Extended Status Register */ -#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ -#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ -#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ -#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ +#define MII_AT001_CR_1000T_SPEED_MASK \ + (ADVERTISE_1000FULL | ADVERTISE_1000HALF) +#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK /* AT001 PHY Specific Control Register */ #define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index e28f8baf394e..1ff001a8270c 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c @@ -547,8 +547,8 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter) hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; - pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); @@ -932,11 +932,11 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) & DEVICE_CTRL_MAX_PAYLOAD_MASK; - hw->dmaw_block = min(max_pay_load, hw->dmaw_block); + hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block); max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) & DEVICE_CTRL_MAX_RREQ_SZ_MASK; - hw->dmar_block = min(max_pay_load, hw->dmar_block); + hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); if (hw->nic_type != athr_l2e_revB) AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2, @@ -2051,9 +2051,9 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state) atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data); - mii_advertise_data = MII_AR_10T_HD_CAPS; + mii_advertise_data = ADVERTISE_10HALF; - if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) || + if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) || (atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_advertise_data) != 0) || (atl1e_phy_commit(hw)) != 0) { diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 3b527687c28f..67f40b9c16ed 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -950,6 +950,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter) hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; adapter->wol = 0; + device_set_wakeup_enable(&adapter->pdev->dev, false); adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; adapter->ict = 50000; /* 100ms */ adapter->link_speed = SPEED_0; /* hardware init */ @@ -2735,15 +2736,15 @@ static int atl1_close(struct net_device *netdev) } #ifdef CONFIG_PM -static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) +static int atl1_suspend(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; u32 ctrl = 0; u32 wufc = adapter->wol; u32 val; - int retval; u16 speed; u16 duplex; @@ -2751,17 +2752,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) if (netif_running(netdev)) atl1_down(adapter); - retval = pci_save_state(pdev); - if (retval) - return retval; - atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); val = ctrl & BMSR_LSTATUS; if (val) wufc &= ~ATLX_WUFC_LNKC; + if (!wufc) + goto disable_wol; - if (val && wufc) { + if (val) { val = atl1_get_speed_and_duplex(hw, &speed, &duplex); if (val) { if (netif_msg_ifdown(adapter)) @@ -2798,23 +2797,18 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); ioread32(hw->hw_addr + REG_PCIE_PHYMISC); - - pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); - goto exit; - } - - if (!val && wufc) { + } else { ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); ioread32(hw->hw_addr + REG_WOL_CTRL); iowrite32(0, hw->hw_addr + REG_MAC_CTRL); ioread32(hw->hw_addr + REG_MAC_CTRL); hw->phy_configured = false; - pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); - goto exit; } -disable_wol: + return 0; + + disable_wol: iowrite32(0, hw->hw_addr + REG_WOL_CTRL); ioread32(hw->hw_addr + REG_WOL_CTRL); ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); @@ -2822,37 +2816,17 @@ disable_wol: iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); ioread32(hw->hw_addr + REG_PCIE_PHYMISC); hw->phy_configured = false; - pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); -exit: - if (netif_running(netdev)) - pci_disable_msi(adapter->pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } -static int atl1_resume(struct pci_dev *pdev) +static int atl1_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter = netdev_priv(netdev); - u32 err; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - - err = pci_enable_device(pdev); - if (err) { - if (netif_msg_ifup(adapter)) - dev_printk(KERN_DEBUG, &pdev->dev, - "error enabling pci device\n"); - return err; - } - - pci_set_master(pdev); iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); atl1_reset_hw(&adapter->hw); @@ -2864,16 +2838,25 @@ static int atl1_resume(struct pci_dev *pdev) return 0; } + +static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume); +#define ATL1_PM_OPS (&atl1_pm_ops) + #else -#define atl1_suspend NULL -#define atl1_resume NULL + +static int atl1_suspend(struct device *dev) { return 0; } + +#define ATL1_PM_OPS NULL #endif static void atl1_shutdown(struct pci_dev *pdev) { -#ifdef CONFIG_PM - atl1_suspend(pdev, PMSG_SUSPEND); -#endif + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1_adapter *adapter = netdev_priv(netdev); + + atl1_suspend(&pdev->dev); + pci_wake_from_d3(pdev, adapter->wol); + pci_set_power_state(pdev, PCI_D3hot); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -3117,9 +3100,8 @@ static struct pci_driver atl1_driver = { .id_table = atl1_pci_tbl, .probe = atl1_probe, .remove = __devexit_p(atl1_remove), - .suspend = atl1_suspend, - .resume = atl1_resume, - .shutdown = atl1_shutdown + .shutdown = atl1_shutdown, + .driver.pm = ATL1_PM_OPS, }; /* @@ -3409,6 +3391,9 @@ static int atl1_set_wol(struct net_device *netdev, adapter->wol = 0; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= ATLX_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + return 0; } diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c index 4e6f4e95a5a0..e637e9f28fd4 100644 --- a/drivers/net/atlx/atl2.c +++ b/drivers/net/atlx/atl2.c @@ -93,8 +93,8 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter) hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; - pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); adapter->wol = 0; diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 4bebff3faeab..e7cb8c8b9776 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c @@ -9,7 +9,7 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. -*/ + */ #include <linux/module.h> #include <linux/kernel.h> @@ -17,46 +17,45 @@ #include <linux/isapnp.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> -#include <linux/mii.h> +#include <linux/mdio-bitbang.h> +#include <linux/phy.h> #include <linux/eeprom_93cx6.h> #include <linux/slab.h> #include <net/ax88796.h> #include <asm/system.h> -#include <asm/io.h> - -static int phy_debug = 0; /* Rename the lib8390.c functions to show that they are in this driver */ -#define __ei_open ax_ei_open -#define __ei_close ax_ei_close -#define __ei_poll ax_ei_poll +#define __ei_open ax_ei_open +#define __ei_close ax_ei_close +#define __ei_poll ax_ei_poll #define __ei_start_xmit ax_ei_start_xmit #define __ei_tx_timeout ax_ei_tx_timeout -#define __ei_get_stats ax_ei_get_stats +#define __ei_get_stats ax_ei_get_stats #define __ei_set_multicast_list ax_ei_set_multicast_list -#define __ei_interrupt ax_ei_interrupt +#define __ei_interrupt ax_ei_interrupt #define ____alloc_ei_netdev ax__alloc_ei_netdev -#define __NS8390_init ax_NS8390_init +#define __NS8390_init ax_NS8390_init /* force unsigned long back to 'void __iomem *' */ #define ax_convert_addr(_a) ((void __force __iomem *)(_a)) -#define ei_inb(_a) readb(ax_convert_addr(_a)) +#define ei_inb(_a) readb(ax_convert_addr(_a)) #define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a)) -#define ei_inb_p(_a) ei_inb(_a) +#define ei_inb_p(_a) ei_inb(_a) #define ei_outb_p(_v, _a) ei_outb(_v, _a) /* define EI_SHIFT() to take into account our register offsets */ -#define EI_SHIFT(x) (ei_local->reg_offset[(x)]) +#define EI_SHIFT(x) (ei_local->reg_offset[(x)]) /* Ensure we have our RCR base value */ #define AX88796_PLATFORM @@ -74,43 +73,46 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron #define NE_DATAPORT EI_SHIFT(0x10) #define NE1SM_START_PG 0x20 /* First page of TX buffer */ -#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ +#define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ #define NESM_START_PG 0x40 /* First page of TX buffer */ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ +#define AX_GPOC_PPDSET BIT(6) + /* device private data */ struct ax_device { - struct timer_list mii_timer; - spinlock_t mii_lock; - struct mii_if_info mii; - - u32 msg_enable; - void __iomem *map2; - struct platform_device *dev; - struct resource *mem; - struct resource *mem2; - struct ax_plat_data *plat; - - unsigned char running; - unsigned char resume_open; - unsigned int irqflags; - - u32 reg_offsets[0x20]; + struct mii_bus *mii_bus; + struct mdiobb_ctrl bb_ctrl; + struct phy_device *phy_dev; + void __iomem *addr_memr; + u8 reg_memr; + int link; + int speed; + int duplex; + + void __iomem *map2; + const struct ax_plat_data *plat; + + unsigned char running; + unsigned char resume_open; + unsigned int irqflags; + + u32 reg_offsets[0x20]; }; static inline struct ax_device *to_ax_dev(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); - return (struct ax_device *)(ei_local+1); + return (struct ax_device *)(ei_local + 1); } -/* ax_initial_check +/* + * ax_initial_check * * do an initial probe for the card to check wether it exists * and is functional */ - static int ax_initial_check(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); @@ -122,10 +124,10 @@ static int ax_initial_check(struct net_device *dev) if (reg0 == 0xFF) return -ENODEV; - ei_outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); + ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD); regd = ei_inb(ioaddr + 0x0d); ei_outb(0xff, ioaddr + 0x0d); - ei_outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); + ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD); ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ if (ei_inb(ioaddr + EN0_COUNTER0) != 0) { ei_outb(reg0, ioaddr); @@ -136,29 +138,28 @@ static int ax_initial_check(struct net_device *dev) return 0; } -/* Hard reset the card. This used to pause for the same period that a - 8390 reset command required, but that shouldn't be necessary. */ - +/* + * Hard reset the card. This used to pause for the same period that a + * 8390 reset command required, but that shouldn't be necessary. + */ static void ax_reset_8390(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); - struct ax_device *ax = to_ax_dev(dev); unsigned long reset_start_time = jiffies; void __iomem *addr = (void __iomem *)dev->base_addr; if (ei_debug > 1) - dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies); + netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies); ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); - ei_status.txing = 0; - ei_status.dmaing = 0; + ei_local->txing = 0; + ei_local->dmaing = 0; /* This check _should_not_ be necessary, omit eventually. */ while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { - if (jiffies - reset_start_time > 2*HZ/100) { - dev_warn(&ax->dev->dev, "%s: %s did not complete.\n", - __func__, dev->name); + if (jiffies - reset_start_time > 2 * HZ / 100) { + netdev_warn(dev, "%s: did not complete.\n", __func__); break; } } @@ -171,70 +172,72 @@ static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { struct ei_device *ei_local = netdev_priv(dev); - struct ax_device *ax = to_ax_dev(dev); void __iomem *nic_base = ei_local->mem; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - dev_err(&ax->dev->dev, "%s: DMAing conflict in %s " + if (ei_local->dmaing) { + netdev_err(dev, "DMAing conflict in %s " "[DMAstat:%d][irqlock:%d].\n", - dev->name, __func__, - ei_status.dmaing, ei_status.irqlock); + __func__, + ei_local->dmaing, ei_local->irqlock); return; } - ei_status.dmaing |= 0x01; - ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + ei_local->dmaing |= 0x01; + ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); ei_outb(0, nic_base + EN0_RCNTHI); ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */ ei_outb(ring_page, nic_base + EN0_RSARHI); ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); - if (ei_status.word16) - readsw(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); + if (ei_local->word16) + readsw(nic_base + NE_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr) >> 1); else - readsb(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); + readsb(nic_base + NE_DATAPORT, hdr, + sizeof(struct e8390_pkt_hdr)); ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; + ei_local->dmaing &= ~0x01; le16_to_cpus(&hdr->count); } -/* Block input and output, similar to the Crynwr packet driver. If you - are porting to a new ethercard, look at the packet driver source for hints. - The NEx000 doesn't share the on-board packet memory -- you have to put - the packet out through the "remote DMA" dataport using ei_outb. */ - +/* + * Block input and output, similar to the Crynwr packet driver. If + * you are porting to a new ethercard, look at the packet driver + * source for hints. The NEx000 doesn't share the on-board packet + * memory -- you have to put the packet out through the "remote DMA" + * dataport using ei_outb. + */ static void ax_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { struct ei_device *ei_local = netdev_priv(dev); - struct ax_device *ax = to_ax_dev(dev); void __iomem *nic_base = ei_local->mem; char *buf = skb->data; - if (ei_status.dmaing) { - dev_err(&ax->dev->dev, - "%s: DMAing conflict in %s " + if (ei_local->dmaing) { + netdev_err(dev, + "DMAing conflict in %s " "[DMAstat:%d][irqlock:%d].\n", - dev->name, __func__, - ei_status.dmaing, ei_status.irqlock); + __func__, + ei_local->dmaing, ei_local->irqlock); return; } - ei_status.dmaing |= 0x01; + ei_local->dmaing |= 0x01; - ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD); + ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD); ei_outb(count & 0xff, nic_base + EN0_RCNTLO); ei_outb(count >> 8, nic_base + EN0_RCNTHI); ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO); ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI); ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); - if (ei_status.word16) { + if (ei_local->word16) { readsw(nic_base + NE_DATAPORT, buf, count >> 1); if (count & 0x01) buf[count-1] = ei_inb(nic_base + NE_DATAPORT); @@ -243,34 +246,34 @@ static void ax_block_input(struct net_device *dev, int count, readsb(nic_base + NE_DATAPORT, buf, count); } - ei_status.dmaing &= ~1; + ei_local->dmaing &= ~1; } static void ax_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page) { struct ei_device *ei_local = netdev_priv(dev); - struct ax_device *ax = to_ax_dev(dev); void __iomem *nic_base = ei_local->mem; unsigned long dma_start; - /* Round the count up for word writes. Do we need to do this? - What effect will an odd byte count have on the 8390? - I should check someday. */ - - if (ei_status.word16 && (count & 0x01)) + /* + * Round the count up for word writes. Do we need to do this? + * What effect will an odd byte count have on the 8390? I + * should check someday. + */ + if (ei_local->word16 && (count & 0x01)) count++; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ - if (ei_status.dmaing) { - dev_err(&ax->dev->dev, "%s: DMAing conflict in %s." + if (ei_local->dmaing) { + netdev_err(dev, "DMAing conflict in %s." "[DMAstat:%d][irqlock:%d]\n", - dev->name, __func__, - ei_status.dmaing, ei_status.irqlock); + __func__, + ei_local->dmaing, ei_local->irqlock); return; } - ei_status.dmaing |= 0x01; + ei_local->dmaing |= 0x01; /* We should already be in page 0, but to be safe... */ ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); @@ -278,250 +281,170 @@ static void ax_block_output(struct net_device *dev, int count, /* Now the normal output. */ ei_outb(count & 0xff, nic_base + EN0_RCNTLO); - ei_outb(count >> 8, nic_base + EN0_RCNTHI); + ei_outb(count >> 8, nic_base + EN0_RCNTHI); ei_outb(0x00, nic_base + EN0_RSARLO); ei_outb(start_page, nic_base + EN0_RSARHI); ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); - if (ei_status.word16) { - writesw(nic_base + NE_DATAPORT, buf, count>>1); - } else { + if (ei_local->word16) + writesw(nic_base + NE_DATAPORT, buf, count >> 1); + else writesb(nic_base + NE_DATAPORT, buf, count); - } dma_start = jiffies; while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { - if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ - dev_warn(&ax->dev->dev, - "%s: timeout waiting for Tx RDC.\n", dev->name); + if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */ + netdev_warn(dev, "timeout waiting for Tx RDC.\n"); ax_reset_8390(dev); - ax_NS8390_init(dev,1); + ax_NS8390_init(dev, 1); break; } } ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ - ei_status.dmaing &= ~0x01; + ei_local->dmaing &= ~0x01; } /* definitions for accessing MII/EEPROM interface */ #define AX_MEMR EI_SHIFT(0x14) -#define AX_MEMR_MDC (1<<0) -#define AX_MEMR_MDIR (1<<1) -#define AX_MEMR_MDI (1<<2) -#define AX_MEMR_MDO (1<<3) -#define AX_MEMR_EECS (1<<4) -#define AX_MEMR_EEI (1<<5) -#define AX_MEMR_EEO (1<<6) -#define AX_MEMR_EECLK (1<<7) - -/* ax_mii_ei_outbits - * - * write the specified set of bits to the phy -*/ - -static void -ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len) +#define AX_MEMR_MDC BIT(0) +#define AX_MEMR_MDIR BIT(1) +#define AX_MEMR_MDI BIT(2) +#define AX_MEMR_MDO BIT(3) +#define AX_MEMR_EECS BIT(4) +#define AX_MEMR_EEI BIT(5) +#define AX_MEMR_EEO BIT(6) +#define AX_MEMR_EECLK BIT(7) + +static void ax_handle_link_change(struct net_device *dev) { - struct ei_device *ei_local = netdev_priv(dev); - void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; - unsigned int memr; - - /* clock low, data to output mode */ - memr = ei_inb(memr_addr); - memr &= ~(AX_MEMR_MDC | AX_MEMR_MDIR); - ei_outb(memr, memr_addr); - - for (len--; len >= 0; len--) { - if (bits & (1 << len)) - memr |= AX_MEMR_MDO; - else - memr &= ~AX_MEMR_MDO; - - ei_outb(memr, memr_addr); - - /* clock high */ + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = ax->phy_dev; + int status_change = 0; - ei_outb(memr | AX_MEMR_MDC, memr_addr); - udelay(1); + if (phy_dev->link && ((ax->speed != phy_dev->speed) || + (ax->duplex != phy_dev->duplex))) { - /* clock low */ - ei_outb(memr, memr_addr); + ax->speed = phy_dev->speed; + ax->duplex = phy_dev->duplex; + status_change = 1; } - /* leaves the clock line low, mdir input */ - memr |= AX_MEMR_MDIR; - ei_outb(memr, (void __iomem *)dev->base_addr + AX_MEMR); -} - -/* ax_phy_ei_inbits - * - * read a specified number of bits from the phy -*/ - -static unsigned int -ax_phy_ei_inbits(struct net_device *dev, int no) -{ - struct ei_device *ei_local = netdev_priv(dev); - void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; - unsigned int memr; - unsigned int result = 0; - - /* clock low, data to input mode */ - memr = ei_inb(memr_addr); - memr &= ~AX_MEMR_MDC; - memr |= AX_MEMR_MDIR; - ei_outb(memr, memr_addr); - - for (no--; no >= 0; no--) { - ei_outb(memr | AX_MEMR_MDC, memr_addr); - - udelay(1); - - if (ei_inb(memr_addr) & AX_MEMR_MDI) - result |= (1<<no); + if (phy_dev->link != ax->link) { + if (!phy_dev->link) { + ax->speed = 0; + ax->duplex = -1; + } + ax->link = phy_dev->link; - ei_outb(memr, memr_addr); + status_change = 1; } - return result; -} - -/* ax_phy_issueaddr - * - * use the low level bit shifting routines to send the address - * and command to the specified phy -*/ - -static void -ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc) -{ - if (phy_debug) - pr_debug("%s: dev %p, %04x, %04x, %d\n", - __func__, dev, phy_addr, reg, opc); - - ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */ - ax_mii_ei_outbits(dev, 1, 2); /* frame-start */ - ax_mii_ei_outbits(dev, opc, 2); /* op code */ - ax_mii_ei_outbits(dev, phy_addr, 5); /* phy address */ - ax_mii_ei_outbits(dev, reg, 5); /* reg address */ + if (status_change) + phy_print_status(phy_dev); } -static int -ax_phy_read(struct net_device *dev, int phy_addr, int reg) +static int ax_mii_probe(struct net_device *dev) { - struct ei_device *ei_local = netdev_priv(dev); - unsigned long flags; - unsigned int result; + struct ax_device *ax = to_ax_dev(dev); + struct phy_device *phy_dev = NULL; + int ret; - spin_lock_irqsave(&ei_local->page_lock, flags); + /* find the first phy */ + phy_dev = phy_find_first(ax->mii_bus); + if (!phy_dev) { + netdev_err(dev, "no PHY found\n"); + return -ENODEV; + } - ax_phy_issueaddr(dev, phy_addr, reg, 2); + ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0, + PHY_INTERFACE_MODE_MII); + if (ret) { + netdev_err(dev, "Could not attach to PHY\n"); + return ret; + } - result = ax_phy_ei_inbits(dev, 17); - result &= ~(3<<16); + /* mask with MAC supported features */ + phy_dev->supported &= PHY_BASIC_FEATURES; + phy_dev->advertising = phy_dev->supported; - spin_unlock_irqrestore(&ei_local->page_lock, flags); + ax->phy_dev = phy_dev; - if (phy_debug) - pr_debug("%s: %04x.%04x => read %04x\n", __func__, - phy_addr, reg, result); + netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq); - return result; + return 0; } -static void -ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value) +static void ax_phy_switch(struct net_device *dev, int on) { - struct ei_device *ei = netdev_priv(dev); - struct ax_device *ax = to_ax_dev(dev); - unsigned long flags; - - dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n", - __func__, dev, phy_addr, reg, value); - - spin_lock_irqsave(&ei->page_lock, flags); - - ax_phy_issueaddr(dev, phy_addr, reg, 1); - ax_mii_ei_outbits(dev, 2, 2); /* send TA */ - ax_mii_ei_outbits(dev, value, 16); - - spin_unlock_irqrestore(&ei->page_lock, flags); -} + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); -static void ax_mii_expiry(unsigned long data) -{ - struct net_device *dev = (struct net_device *)data; - struct ax_device *ax = to_ax_dev(dev); - unsigned long flags; + u8 reg_gpoc = ax->plat->gpoc_val; - spin_lock_irqsave(&ax->mii_lock, flags); - mii_check_media(&ax->mii, netif_msg_link(ax), 0); - spin_unlock_irqrestore(&ax->mii_lock, flags); + if (!!on) + reg_gpoc &= ~AX_GPOC_PPDSET; + else + reg_gpoc |= AX_GPOC_PPDSET; - if (ax->running) { - ax->mii_timer.expires = jiffies + HZ*2; - add_timer(&ax->mii_timer); - } + ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17)); } static int ax_open(struct net_device *dev) { - struct ax_device *ax = to_ax_dev(dev); - struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); int ret; - dev_dbg(&ax->dev->dev, "%s: open\n", dev->name); + netdev_dbg(dev, "open\n"); ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, dev->name, dev); if (ret) - return ret; - - ret = ax_ei_open(dev); - if (ret) { - free_irq(dev->irq, dev); - return ret; - } + goto failed_request_irq; /* turn the phy on (if turned off) */ + ax_phy_switch(dev, 1); - ei_outb(ax->plat->gpoc_val, ei_local->mem + EI_SHIFT(0x17)); - ax->running = 1; - - /* start the MII timer */ - - init_timer(&ax->mii_timer); + ret = ax_mii_probe(dev); + if (ret) + goto failed_mii_probe; + phy_start(ax->phy_dev); - ax->mii_timer.expires = jiffies+1; - ax->mii_timer.data = (unsigned long) dev; - ax->mii_timer.function = ax_mii_expiry; + ret = ax_ei_open(dev); + if (ret) + goto failed_ax_ei_open; - add_timer(&ax->mii_timer); + ax->running = 1; return 0; + + failed_ax_ei_open: + phy_disconnect(ax->phy_dev); + failed_mii_probe: + ax_phy_switch(dev, 0); + free_irq(dev->irq, dev); + failed_request_irq: + return ret; } static int ax_close(struct net_device *dev) { struct ax_device *ax = to_ax_dev(dev); - struct ei_device *ei_local = netdev_priv(dev); - dev_dbg(&ax->dev->dev, "%s: close\n", dev->name); - - /* turn the phy off */ - - ei_outb(ax->plat->gpoc_val | (1<<6), - ei_local->mem + EI_SHIFT(0x17)); + netdev_dbg(dev, "close\n"); ax->running = 0; wmb(); - del_timer_sync(&ax->mii_timer); ax_ei_close(dev); + /* turn the phy off */ + ax_phy_switch(dev, 0); + phy_disconnect(ax->phy_dev); + free_irq(dev->irq, dev); return 0; } @@ -529,17 +452,15 @@ static int ax_close(struct net_device *dev) static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { struct ax_device *ax = to_ax_dev(dev); - unsigned long flags; - int rc; + struct phy_device *phy_dev = ax->phy_dev; if (!netif_running(dev)) return -EINVAL; - spin_lock_irqsave(&ax->mii_lock, flags); - rc = generic_mii_ioctl(&ax->mii, if_mii(req), cmd, NULL); - spin_unlock_irqrestore(&ax->mii_lock, flags); + if (!phy_dev) + return -ENODEV; - return rc; + return phy_mii_ioctl(phy_dev, req, cmd); } /* ethtool ops */ @@ -547,56 +468,40 @@ static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) static void ax_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { - struct ax_device *ax = to_ax_dev(dev); + struct platform_device *pdev = to_platform_device(dev->dev.parent); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); - strcpy(info->bus_info, ax->dev->name); + strcpy(info->bus_info, pdev->name); } static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct ax_device *ax = to_ax_dev(dev); - unsigned long flags; + struct phy_device *phy_dev = ax->phy_dev; - spin_lock_irqsave(&ax->mii_lock, flags); - mii_ethtool_gset(&ax->mii, cmd); - spin_unlock_irqrestore(&ax->mii_lock, flags); + if (!phy_dev) + return -ENODEV; - return 0; + return phy_ethtool_gset(phy_dev, cmd); } static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct ax_device *ax = to_ax_dev(dev); - unsigned long flags; - int rc; + struct phy_device *phy_dev = ax->phy_dev; - spin_lock_irqsave(&ax->mii_lock, flags); - rc = mii_ethtool_sset(&ax->mii, cmd); - spin_unlock_irqrestore(&ax->mii_lock, flags); - - return rc; -} - -static int ax_nway_reset(struct net_device *dev) -{ - struct ax_device *ax = to_ax_dev(dev); - return mii_nway_restart(&ax->mii); -} + if (!phy_dev) + return -ENODEV; -static u32 ax_get_link(struct net_device *dev) -{ - struct ax_device *ax = to_ax_dev(dev); - return mii_link_ok(&ax->mii); + return phy_ethtool_sset(phy_dev, cmd); } static const struct ethtool_ops ax_ethtool_ops = { .get_drvinfo = ax_get_drvinfo, .get_settings = ax_get_settings, .set_settings = ax_set_settings, - .nway_reset = ax_nway_reset, - .get_link = ax_get_link, + .get_link = ethtool_op_get_link, }; #ifdef CONFIG_AX88796_93CX6 @@ -640,37 +545,131 @@ static const struct net_device_ops ax_netdev_ops = { .ndo_get_stats = ax_ei_get_stats, .ndo_set_multicast_list = ax_ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ax_ei_poll, #endif }; +static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (level) + ax->reg_memr |= AX_MEMR_MDC; + else + ax->reg_memr &= ~AX_MEMR_MDC; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (output) + ax->reg_memr &= ~AX_MEMR_MDIR; + else + ax->reg_memr |= AX_MEMR_MDIR; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + + if (value) + ax->reg_memr |= AX_MEMR_MDO; + else + ax->reg_memr &= ~AX_MEMR_MDO; + + ei_outb(ax->reg_memr, ax->addr_memr); +} + +static int ax_bb_get_data(struct mdiobb_ctrl *ctrl) +{ + struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); + int reg_memr = ei_inb(ax->addr_memr); + + return reg_memr & AX_MEMR_MDI ? 1 : 0; +} + +static struct mdiobb_ops bb_ops = { + .owner = THIS_MODULE, + .set_mdc = ax_bb_mdc, + .set_mdio_dir = ax_bb_dir, + .set_mdio_data = ax_bb_set_data, + .get_mdio_data = ax_bb_get_data, +}; + /* setup code */ +static int ax_mii_init(struct net_device *dev) +{ + struct platform_device *pdev = to_platform_device(dev->dev.parent); + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + int err, i; + + ax->bb_ctrl.ops = &bb_ops; + ax->addr_memr = ei_local->mem + AX_MEMR; + ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl); + if (!ax->mii_bus) { + err = -ENOMEM; + goto out; + } + + ax->mii_bus->name = "ax88796_mii_bus"; + ax->mii_bus->parent = dev->dev.parent; + snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); + + ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!ax->mii_bus->irq) { + err = -ENOMEM; + goto out_free_mdio_bitbang; + } + + for (i = 0; i < PHY_MAX_ADDR; i++) + ax->mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(ax->mii_bus); + if (err) + goto out_free_irq; + + return 0; + + out_free_irq: + kfree(ax->mii_bus->irq); + out_free_mdio_bitbang: + free_mdio_bitbang(ax->mii_bus); + out: + return err; +} + static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) { void __iomem *ioaddr = ei_local->mem; struct ax_device *ax = to_ax_dev(dev); - /* Select page 0*/ - ei_outb(E8390_NODMA+E8390_PAGE0+E8390_STOP, ioaddr + E8390_CMD); + /* Select page 0 */ + ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD); /* set to byte access */ ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG); ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17)); } -/* ax_init_dev +/* + * ax_init_dev * * initialise the specified device, taking care to note the MAC * address it may already have (if configured), ensure * the device is ready to be used by lib8390.c and registerd with * the network layer. */ - -static int ax_init_dev(struct net_device *dev, int first_init) +static int ax_init_dev(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); struct ax_device *ax = to_ax_dev(dev); @@ -690,23 +689,23 @@ static int ax_init_dev(struct net_device *dev, int first_init) /* read the mac from the card prom if we need it */ - if (first_init && ax->plat->flags & AXFLG_HAS_EEPROM) { + if (ax->plat->flags & AXFLG_HAS_EEPROM) { unsigned char SA_prom[32]; - for(i = 0; i < sizeof(SA_prom); i+=2) { + for (i = 0; i < sizeof(SA_prom); i += 2) { SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); - SA_prom[i+1] = ei_inb(ioaddr + NE_DATAPORT); + SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT); } if (ax->plat->wordlength == 2) for (i = 0; i < 16; i++) SA_prom[i] = SA_prom[i+i]; - memcpy(dev->dev_addr, SA_prom, 6); + memcpy(dev->dev_addr, SA_prom, 6); } #ifdef CONFIG_AX88796_93CX6 - if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) { + if (ax->plat->flags & AXFLG_HAS_93CX6) { unsigned char mac_addr[6]; struct eeprom_93cx6 eeprom; @@ -719,7 +718,7 @@ static int ax_init_dev(struct net_device *dev, int first_init) (__le16 __force *)mac_addr, sizeof(mac_addr) >> 1); - memcpy(dev->dev_addr, mac_addr, 6); + memcpy(dev->dev_addr, mac_addr, 6); } #endif if (ax->plat->wordlength == 2) { @@ -732,67 +731,56 @@ static int ax_init_dev(struct net_device *dev, int first_init) stop_page = NE1SM_STOP_PG; } - /* load the mac-address from the device if this is the - * first time we've initialised */ - - if (first_init) { - if (ax->plat->flags & AXFLG_MAC_FROMDEV) { - ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, - ei_local->mem + E8390_CMD); /* 0x61 */ - for (i = 0; i < ETHER_ADDR_LEN; i++) - dev->dev_addr[i] = - ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); - } - - if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) && - ax->plat->mac_addr) - memcpy(dev->dev_addr, ax->plat->mac_addr, - ETHER_ADDR_LEN); + /* load the mac-address from the device */ + if (ax->plat->flags & AXFLG_MAC_FROMDEV) { + ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, + ei_local->mem + E8390_CMD); /* 0x61 */ + for (i = 0; i < ETHER_ADDR_LEN; i++) + dev->dev_addr[i] = + ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); } + if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) && + ax->plat->mac_addr) + memcpy(dev->dev_addr, ax->plat->mac_addr, + ETHER_ADDR_LEN); + ax_reset_8390(dev); - ei_status.name = "AX88796"; - ei_status.tx_start_page = start_page; - ei_status.stop_page = stop_page; - ei_status.word16 = (ax->plat->wordlength == 2); - ei_status.rx_start_page = start_page + TX_PAGES; + ei_local->name = "AX88796"; + ei_local->tx_start_page = start_page; + ei_local->stop_page = stop_page; + ei_local->word16 = (ax->plat->wordlength == 2); + ei_local->rx_start_page = start_page + TX_PAGES; #ifdef PACKETBUF_MEMSIZE - /* Allow the packet buffer size to be overridden by know-it-alls. */ - ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; + /* Allow the packet buffer size to be overridden by know-it-alls. */ + ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE; #endif - ei_status.reset_8390 = &ax_reset_8390; - ei_status.block_input = &ax_block_input; - ei_status.block_output = &ax_block_output; - ei_status.get_8390_hdr = &ax_get_8390_hdr; - ei_status.priv = 0; - - dev->netdev_ops = &ax_netdev_ops; - dev->ethtool_ops = &ax_ethtool_ops; - - ax->msg_enable = NETIF_MSG_LINK; - ax->mii.phy_id_mask = 0x1f; - ax->mii.reg_num_mask = 0x1f; - ax->mii.phy_id = 0x10; /* onboard phy */ - ax->mii.force_media = 0; - ax->mii.full_duplex = 0; - ax->mii.mdio_read = ax_phy_read; - ax->mii.mdio_write = ax_phy_write; - ax->mii.dev = dev; + ei_local->reset_8390 = &ax_reset_8390; + ei_local->block_input = &ax_block_input; + ei_local->block_output = &ax_block_output; + ei_local->get_8390_hdr = &ax_get_8390_hdr; + ei_local->priv = 0; - ax_NS8390_init(dev, 0); + dev->netdev_ops = &ax_netdev_ops; + dev->ethtool_ops = &ax_ethtool_ops; - if (first_init) - dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %pM\n", - ei_status.word16 ? 16:8, dev->irq, dev->base_addr, - dev->dev_addr); + ret = ax_mii_init(dev); + if (ret) + goto out_irq; + + ax_NS8390_init(dev, 0); ret = register_netdev(dev); if (ret) goto out_irq; + netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", + ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, + dev->dev_addr); + return 0; out_irq: @@ -802,24 +790,24 @@ static int ax_init_dev(struct net_device *dev, int first_init) return ret; } -static int ax_remove(struct platform_device *_dev) +static int ax_remove(struct platform_device *pdev) { - struct net_device *dev = platform_get_drvdata(_dev); - struct ax_device *ax; - - ax = to_ax_dev(dev); + struct net_device *dev = platform_get_drvdata(pdev); + struct ei_device *ei_local = netdev_priv(dev); + struct ax_device *ax = to_ax_dev(dev); + struct resource *mem; unregister_netdev(dev); free_irq(dev->irq, dev); - iounmap(ei_status.mem); - release_resource(ax->mem); - kfree(ax->mem); + iounmap(ei_local->mem); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, resource_size(mem)); if (ax->map2) { iounmap(ax->map2); - release_resource(ax->mem2); - kfree(ax->mem2); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + release_mem_region(mem->start, resource_size(mem)); } free_netdev(dev); @@ -827,19 +815,20 @@ static int ax_remove(struct platform_device *_dev) return 0; } -/* ax_probe +/* + * ax_probe * * This is the entry point when the platform device system uses to - * notify us of a new device to attach to. Allocate memory, find - * the resources and information passed, and map the necessary registers. -*/ - + * notify us of a new device to attach to. Allocate memory, find the + * resources and information passed, and map the necessary registers. + */ static int ax_probe(struct platform_device *pdev) { struct net_device *dev; - struct ax_device *ax; - struct resource *res; - size_t size; + struct ei_device *ei_local; + struct ax_device *ax; + struct resource *irq, *mem, *mem2; + resource_size_t mem_size, mem2_size = 0; int ret = 0; dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); @@ -847,120 +836,107 @@ static int ax_probe(struct platform_device *pdev) return -ENOMEM; /* ok, let's setup our device */ + SET_NETDEV_DEV(dev, &pdev->dev); + ei_local = netdev_priv(dev); ax = to_ax_dev(dev); - memset(ax, 0, sizeof(struct ax_device)); - - spin_lock_init(&ax->mii_lock); - - ax->dev = pdev; ax->plat = pdev->dev.platform_data; platform_set_drvdata(pdev, dev); - ei_status.rxcr_base = ax->plat->rcr_val; + ei_local->rxcr_base = ax->plat->rcr_val; /* find the platform resources */ - - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (res == NULL) { + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq) { dev_err(&pdev->dev, "no IRQ specified\n"); ret = -ENXIO; goto exit_mem; } - dev->irq = res->start; - ax->irqflags = res->flags & IRQF_TRIGGER_MASK; + dev->irq = irq->start; + ax->irqflags = irq->flags & IRQF_TRIGGER_MASK; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { dev_err(&pdev->dev, "no MEM specified\n"); ret = -ENXIO; goto exit_mem; } - size = (res->end - res->start) + 1; - - /* setup the register offsets from either the platform data - * or by using the size of the resource provided */ + mem_size = resource_size(mem); + /* + * setup the register offsets from either the platform data or + * by using the size of the resource provided + */ if (ax->plat->reg_offsets) - ei_status.reg_offset = ax->plat->reg_offsets; + ei_local->reg_offset = ax->plat->reg_offsets; else { - ei_status.reg_offset = ax->reg_offsets; + ei_local->reg_offset = ax->reg_offsets; for (ret = 0; ret < 0x18; ret++) - ax->reg_offsets[ret] = (size / 0x18) * ret; + ax->reg_offsets[ret] = (mem_size / 0x18) * ret; } - ax->mem = request_mem_region(res->start, size, pdev->name); - if (ax->mem == NULL) { + if (!request_mem_region(mem->start, mem_size, pdev->name)) { dev_err(&pdev->dev, "cannot reserve registers\n"); - ret = -ENXIO; + ret = -ENXIO; goto exit_mem; } - ei_status.mem = ioremap(res->start, size); - dev->base_addr = (unsigned long)ei_status.mem; + ei_local->mem = ioremap(mem->start, mem_size); + dev->base_addr = (unsigned long)ei_local->mem; - if (ei_status.mem == NULL) { - dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n", - (unsigned long long)res->start, - (unsigned long long)res->end); + if (ei_local->mem == NULL) { + dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem); - ret = -ENXIO; + ret = -ENXIO; goto exit_req; } /* look for reset area */ - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res == NULL) { + mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!mem2) { if (!ax->plat->reg_offsets) { for (ret = 0; ret < 0x20; ret++) - ax->reg_offsets[ret] = (size / 0x20) * ret; + ax->reg_offsets[ret] = (mem_size / 0x20) * ret; } - - ax->map2 = NULL; } else { - size = (res->end - res->start) + 1; + mem2_size = resource_size(mem2); - ax->mem2 = request_mem_region(res->start, size, pdev->name); - if (ax->mem2 == NULL) { + if (!request_mem_region(mem2->start, mem2_size, pdev->name)) { dev_err(&pdev->dev, "cannot reserve registers\n"); ret = -ENXIO; goto exit_mem1; } - ax->map2 = ioremap(res->start, size); - if (ax->map2 == NULL) { + ax->map2 = ioremap(mem2->start, mem2_size); + if (!ax->map2) { dev_err(&pdev->dev, "cannot map reset register\n"); ret = -ENXIO; goto exit_mem2; } - ei_status.reg_offset[0x1f] = ax->map2 - ei_status.mem; + ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem; } /* got resources, now initialise and register device */ - - ret = ax_init_dev(dev, 1); + ret = ax_init_dev(dev); if (!ret) return 0; - if (ax->map2 == NULL) + if (!ax->map2) goto exit_mem1; iounmap(ax->map2); exit_mem2: - release_resource(ax->mem2); - kfree(ax->mem2); + release_mem_region(mem2->start, mem2_size); exit_mem1: - iounmap(ei_status.mem); + iounmap(ei_local->mem); exit_req: - release_resource(ax->mem); - kfree(ax->mem); + release_mem_region(mem->start, mem_size); exit_mem: free_netdev(dev); @@ -974,7 +950,7 @@ static int ax_probe(struct platform_device *pdev) static int ax_suspend(struct platform_device *dev, pm_message_t state) { struct net_device *ndev = platform_get_drvdata(dev); - struct ax_device *ax = to_ax_dev(ndev); + struct ax_device *ax = to_ax_dev(ndev); ax->resume_open = ax->running; @@ -987,7 +963,7 @@ static int ax_suspend(struct platform_device *dev, pm_message_t state) static int ax_resume(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - struct ax_device *ax = to_ax_dev(ndev); + struct ax_device *ax = to_ax_dev(ndev); ax_initial_setup(ndev, netdev_priv(ndev)); ax_NS8390_init(ndev, ax->resume_open); @@ -1001,7 +977,7 @@ static int ax_resume(struct platform_device *pdev) #else #define ax_suspend NULL -#define ax_resume NULL +#define ax_resume NULL #endif static struct platform_driver axdrv = { diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index add0b93350dd..f803c58b941d 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,11 +8,11 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@serverengines.com + * linux-drivers@emulex.com * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #ifndef BE_H @@ -33,7 +33,7 @@ #include "be_hw.h" -#define DRV_VER "2.103.175u" +#define DRV_VER "4.0.100u" #define DRV_NAME "be2net" #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" @@ -67,7 +67,7 @@ static inline char *nic_name(struct pci_dev *pdev) } /* Number of bytes of an RX frame that are copied to skb->data */ -#define BE_HDR_LEN 64 +#define BE_HDR_LEN ((u16) 64) #define BE_MAX_JUMBO_FRAME_SIZE 9018 #define BE_MIN_MTU 256 @@ -211,18 +211,40 @@ struct be_rx_stats { u32 rx_fps; /* Rx frags per second */ }; +struct be_rx_compl_info { + u32 rss_hash; + u16 vid; + u16 pkt_size; + u16 rxq_idx; + u16 mac_id; + u8 vlanf; + u8 num_rcvd; + u8 err; + u8 ipf; + u8 tcpf; + u8 udpf; + u8 ip_csum; + u8 l4_csum; + u8 ipv6; + u8 vtm; + u8 pkt_type; +}; + struct be_rx_obj { struct be_adapter *adapter; struct be_queue_info q; struct be_queue_info cq; + struct be_rx_compl_info rxcp; struct be_rx_page_info page_info_tbl[RX_Q_LEN]; struct be_eq_obj rx_eq; struct be_rx_stats stats; u8 rss_id; bool rx_post_starved; /* Zero rx frags have been posted to BE */ - u16 last_frag_index; - u16 rsvd; - u32 cache_line_barrier[15]; + u32 cache_line_barrier[16]; +}; + +struct be_drv_stats { + u8 be_on_die_temperature; }; struct be_vf_cfg { @@ -234,6 +256,7 @@ struct be_vf_cfg { }; #define BE_INVALID_PMAC_ID 0xffffffff + struct be_adapter { struct pci_dev *pdev; struct net_device *netdev; @@ -269,6 +292,7 @@ struct be_adapter { u32 big_page_size; /* Compounded page size shared by rx wrbs */ u8 msix_vec_next_idx; + struct be_drv_stats drv_stats; struct vlan_group *vlan_grp; u16 vlans_added; @@ -281,6 +305,7 @@ struct be_adapter { struct be_dma_mem stats_cmd; /* Work queue used to perform periodic tasks like getting statistics */ struct delayed_work work; + u16 work_counter; /* Ethtool knobs and info */ bool rx_csum; /* BE card must perform rx-checksumming */ @@ -298,7 +323,7 @@ struct be_adapter { u32 rx_fc; /* Rx flow control */ u32 tx_fc; /* Tx flow control */ bool ue_detected; - bool stats_ioctl_sent; + bool stats_cmd_sent; int link_speed; u8 port_type; u8 transceiver; @@ -307,10 +332,13 @@ struct be_adapter { u32 flash_status; struct completion flash_compl; + bool be3_native; bool sriov_enabled; struct be_vf_cfg vf_cfg[BE_MAX_VF]; u8 is_virtfn; u32 sli_family; + u8 hba_port_num; + u16 pvid; }; #define be_physfn(adapter) (!adapter->is_virtfn) @@ -450,9 +478,8 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) mac[5] = (u8)(addr & 0xFF); mac[4] = (u8)((addr >> 8) & 0xFF); mac[3] = (u8)((addr >> 16) & 0xFF); - mac[2] = 0xC9; - mac[1] = 0x00; - mac[0] = 0x00; + /* Use the OUI from the current MAC address */ + memcpy(mac, adapter->netdev->dev_addr, 3); } extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index a179cc6d79f2..5a4a87e7c5ea 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,21 +8,30 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@serverengines.com + * linux-drivers@emulex.com * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #include "be.h" #include "be_cmds.h" +/* Must be a power of 2 or else MODULO will BUG_ON */ +static int be_get_temp_freq = 32; + static void be_mcc_notify(struct be_adapter *adapter) { struct be_queue_info *mccq = &adapter->mcc_obj.q; u32 val = 0; + if (adapter->eeh_err) { + dev_info(&adapter->pdev->dev, + "Error in Card Detected! Cannot issue commands\n"); + return; + } + val |= mccq->id & DB_MCCQ_RING_ID_MASK; val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; @@ -75,7 +84,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter, be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); netdev_stats_update(adapter); - adapter->stats_ioctl_sent = false; + adapter->stats_cmd_sent = false; } } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) && (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) { @@ -102,6 +111,7 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, { if (evt->valid) { adapter->vlan_prio_bmap = evt->available_priority_bmap; + adapter->recommended_prio &= ~VLAN_PRIO_MASK; adapter->recommended_prio = evt->reco_default_priority << VLAN_PRIO_SHIFT; } @@ -117,6 +127,16 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, } } +/*Grp5 PVID evt*/ +static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, + struct be_async_event_grp5_pvid_state *evt) +{ + if (evt->enabled) + adapter->pvid = evt->tag; + else + adapter->pvid = 0; +} + static void be_async_grp5_evt_process(struct be_adapter *adapter, u32 trailer, struct be_mcc_compl *evt) { @@ -134,6 +154,10 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter, be_async_grp5_qos_speed_process(adapter, (struct be_async_event_grp5_qos_link_speed *)evt); break; + case ASYNC_EVENT_PVID_STATE: + be_async_grp5_pvid_state_process(adapter, + (struct be_async_event_grp5_pvid_state *)evt); + break; default: dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); break; @@ -216,6 +240,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) int i, num, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; + if (adapter->eeh_err) + return -EIO; + for (i = 0; i < mcc_timeout; i++) { num = be_process_mcc(adapter, &status); if (num) @@ -245,6 +272,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) int msecs = 0; u32 ready; + if (adapter->eeh_err) { + dev_err(&adapter->pdev->dev, + "Error detected in card.Cannot issue commands\n"); + return -EIO; + } + do { ready = ioread32(db); if (ready == 0xffffffff) { @@ -598,7 +631,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, /* Uses synchronous MCCQ */ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, - u32 if_id, u32 *pmac_id) + u32 if_id, u32 *pmac_id, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_pmac_add *req; @@ -619,6 +652,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); + req->hdr.domain = domain; req->if_id = cpu_to_le32(if_id); memcpy(req->mac_address, mac_addr, ETH_ALEN); @@ -634,7 +668,7 @@ err: } /* Uses synchronous MCCQ */ -int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) +int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom) { struct be_mcc_wrb *wrb; struct be_cmd_req_pmac_del *req; @@ -655,6 +689,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); + req->hdr.domain = dom; req->if_id = cpu_to_le32(if_id); req->pmac_id = cpu_to_le32(pmac_id); @@ -691,7 +726,7 @@ int be_cmd_cq_create(struct be_adapter *adapter, req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); if (lancer_chip(adapter)) { - req->hdr.version = 1; + req->hdr.version = 2; req->page_size = 1; /* 1 for 4K */ AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt, coalesce_wm); @@ -827,6 +862,12 @@ int be_cmd_txq_create(struct be_adapter *adapter, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, sizeof(*req)); + if (lancer_chip(adapter)) { + req->hdr.version = 1; + AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt, + adapter->if_handle); + } + req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); req->ulp_num = BE_ULP1_NUM; req->type = BE_ETH_TX_RING_TYPE_STANDARD; @@ -995,7 +1036,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, } /* Uses mbox */ -int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) +int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_if_destroy *req; @@ -1016,6 +1057,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); + req->hdr.domain = domain; req->interface_id = cpu_to_le32(interface_id); status = be_mbox_notify_wait(adapter); @@ -1036,6 +1078,9 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) struct be_sge *sge; int status = 0; + if (MODULO(adapter->work_counter, be_get_temp_freq) == 0) + be_cmd_get_die_temperature(adapter); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -1056,7 +1101,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) sge->len = cpu_to_le32(nonemb_cmd->size); be_mcc_notify(adapter); - adapter->stats_ioctl_sent = true; + adapter->stats_cmd_sent = true; err: spin_unlock_bh(&adapter->mcc_lock); @@ -1103,6 +1148,44 @@ err: return status; } +/* Uses synchronous mcc */ +int be_cmd_get_die_temperature(struct be_adapter *adapter) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_get_cntl_addnl_attribs *req; + int status; + + spin_lock_bh(&adapter->mcc_lock); + + wrb = wrb_from_mccq(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = embedded_payload(wrb); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, + OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req)); + + status = be_mcc_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_get_cntl_addnl_attribs *resp = + embedded_payload(wrb); + adapter->drv_stats.be_on_die_temperature = + resp->on_die_temperature; + } + /* If IOCTL fails once, do not bother issuing it again */ + else + be_get_temp_freq = 0; + +err: + spin_unlock_bh(&adapter->mcc_lock); + return status; +} + /* Uses Mbox */ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) { @@ -1868,8 +1951,8 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) OPCODE_COMMON_SET_QOS, sizeof(*req)); req->hdr.domain = domain; - req->valid_bits = BE_QOS_BITS_NIC; - req->max_bps_nic = bps; + req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); + req->max_bps_nic = cpu_to_le32(bps); status = be_mcc_notify_wait(adapter); @@ -1877,3 +1960,96 @@ err: spin_unlock_bh(&adapter->mcc_lock); return status; } + +int be_cmd_get_cntl_attributes(struct be_adapter *adapter) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_cntl_attribs *req; + struct be_cmd_resp_cntl_attribs *resp; + struct be_sge *sge; + int status; + int payload_len = max(sizeof(*req), sizeof(*resp)); + struct mgmt_controller_attrib *attribs; + struct be_dma_mem attribs_cmd; + + memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); + attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); + attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, + &attribs_cmd.dma); + if (!attribs_cmd.va) { + dev_err(&adapter->pdev->dev, + "Memory allocation failure\n"); + return -ENOMEM; + } + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + req = attribs_cmd.va; + sge = nonembedded_sgl(wrb); + + be_wrb_hdr_prepare(wrb, payload_len, false, 1, + OPCODE_COMMON_GET_CNTL_ATTRIBUTES); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len); + sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma)); + sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF); + sge->len = cpu_to_le32(attribs_cmd.size); + + status = be_mbox_notify_wait(adapter); + if (!status) { + attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va + + sizeof(struct be_cmd_resp_hdr)); + adapter->hba_port_num = attribs->hba_attribs.phy_port; + } + +err: + mutex_unlock(&adapter->mbox_lock); + pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va, + attribs_cmd.dma); + return status; +} + +/* Uses mbox */ +int be_cmd_check_native_mode(struct be_adapter *adapter) +{ + struct be_mcc_wrb *wrb; + struct be_cmd_req_set_func_cap *req; + int status; + + if (mutex_lock_interruptible(&adapter->mbox_lock)) + return -1; + + wrb = wrb_from_mbox(adapter); + if (!wrb) { + status = -EBUSY; + goto err; + } + + req = embedded_payload(wrb); + + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, + OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP); + + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req)); + + req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | + CAPABILITY_BE3_NATIVE_ERX_API); + req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); + + status = be_mbox_notify_wait(adapter); + if (!status) { + struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); + adapter->be3_native = le32_to_cpu(resp->cap_flags) & + CAPABILITY_BE3_NATIVE_ERX_API; + } +err: + mutex_unlock(&adapter->mbox_lock); + return status; +} diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index 83d15c8a9fa3..4f254cfaabe2 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,11 +8,11 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@serverengines.com + * linux-drivers@emulex.com * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ /* @@ -88,6 +88,7 @@ struct be_mcc_compl { #define ASYNC_EVENT_CODE_GRP_5 0x5 #define ASYNC_EVENT_QOS_SPEED 0x1 #define ASYNC_EVENT_COS_PRIORITY 0x2 +#define ASYNC_EVENT_PVID_STATE 0x3 struct be_async_event_trailer { u32 code; }; @@ -134,6 +135,18 @@ struct be_async_event_grp5_cos_priority { struct be_async_event_trailer trailer; } __packed; +/* When the event code of an async trailer is GRP5 and event type is + * PVID state, the mcc_compl must be interpreted as follows + */ +struct be_async_event_grp5_pvid_state { + u8 enabled; + u8 rsvd0; + u16 tag; + u32 event_tag; + u32 rsvd1; + struct be_async_event_trailer trailer; +} __packed; + struct be_mcc_mailbox { struct be_mcc_wrb wrb; struct be_mcc_compl compl; @@ -156,6 +169,7 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_SET_QOS 28 #define OPCODE_COMMON_MCC_CREATE_EXT 90 #define OPCODE_COMMON_SEEPROM_READ 30 +#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES 32 #define OPCODE_COMMON_NTWK_RX_FILTER 34 #define OPCODE_COMMON_GET_FW_VERSION 35 #define OPCODE_COMMON_SET_FLOW_CONTROL 36 @@ -176,6 +190,8 @@ struct be_mcc_mailbox { #define OPCODE_COMMON_GET_BEACON_STATE 70 #define OPCODE_COMMON_READ_TRANSRECV_DATA 73 #define OPCODE_COMMON_GET_PHY_DETAILS 102 +#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 +#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 #define OPCODE_ETH_RSS_CONFIG 1 #define OPCODE_ETH_ACPI_CONFIG 2 @@ -415,7 +431,7 @@ struct be_cmd_resp_mcc_create { /* Pseudo amap definition in which each bit of the actual structure is defined * as a byte: used to calculate offset/shift/mask of each field */ struct amap_tx_context { - u8 rsvd0[16]; /* dword 0 */ + u8 if_id[16]; /* dword 0 */ u8 tx_ring_size[4]; /* dword 0 */ u8 rsvd1[26]; /* dword 0 */ u8 pci_func_id[8]; /* dword 1 */ @@ -503,7 +519,8 @@ enum be_if_flags { BE_IF_FLAGS_VLAN = 0x100, BE_IF_FLAGS_MCAST_PROMISCUOUS = 0x200, BE_IF_FLAGS_PASS_L2_ERRORS = 0x400, - BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800 + BE_IF_FLAGS_PASS_L3L4_ERRORS = 0x800, + BE_IF_FLAGS_MULTICAST = 0x1000 }; /* An RX interface is an object with one or more MAC addresses and @@ -619,7 +636,10 @@ struct be_rxf_stats { u32 rx_drops_invalid_ring; /* dword 145*/ u32 forwarded_packets; /* dword 146*/ u32 rx_drops_mtu; /* dword 147*/ - u32 rsvd0[15]; + u32 rsvd0[7]; + u32 port0_jabber_events; + u32 port1_jabber_events; + u32 rsvd1[6]; }; struct be_erx_stats { @@ -630,11 +650,16 @@ struct be_erx_stats { u32 debug_pmem_pbuf_dealloc; /* dword 47*/ }; +struct be_pmem_stats { + u32 eth_red_drops; + u32 rsvd[4]; +}; + struct be_hw_stats { struct be_rxf_stats rxf; u32 rsvd[48]; struct be_erx_stats erx; - u32 rsvd1[6]; + struct be_pmem_stats pmem; }; struct be_cmd_req_get_stats { @@ -647,6 +672,20 @@ struct be_cmd_resp_get_stats { struct be_hw_stats hw_stats; }; +struct be_cmd_req_get_cntl_addnl_attribs { + struct be_cmd_req_hdr hdr; + u8 rsvd[8]; +}; + +struct be_cmd_resp_get_cntl_addnl_attribs { + struct be_cmd_resp_hdr hdr; + u16 ipl_file_number; + u8 ipl_file_version; + u8 rsvd0; + u8 on_die_temperature; /* in degrees centigrade*/ + u8 rsvd1[3]; +}; + struct be_cmd_req_vlan_config { struct be_cmd_req_hdr hdr; u8 interface_id; @@ -994,17 +1033,47 @@ struct be_cmd_resp_set_qos { u32 rsvd; }; +/*********************** Controller Attributes ***********************/ +struct be_cmd_req_cntl_attribs { + struct be_cmd_req_hdr hdr; +}; + +struct be_cmd_resp_cntl_attribs { + struct be_cmd_resp_hdr hdr; + struct mgmt_controller_attrib attribs; +}; + +/*********************** Set driver function ***********************/ +#define CAPABILITY_SW_TIMESTAMPS 2 +#define CAPABILITY_BE3_NATIVE_ERX_API 4 + +struct be_cmd_req_set_func_cap { + struct be_cmd_req_hdr hdr; + u32 valid_cap_flags; + u32 cap_flags; + u8 rsvd[212]; +}; + +struct be_cmd_resp_set_func_cap { + struct be_cmd_resp_hdr hdr; + u32 valid_cap_flags; + u32 cap_flags; + u8 rsvd[212]; +}; + extern int be_pci_fnum_get(struct be_adapter *adapter); extern int be_cmd_POST(struct be_adapter *adapter); extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, u8 type, bool permanent, u32 if_handle); extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, - u32 if_id, u32 *pmac_id); -extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); + u32 if_id, u32 *pmac_id, u32 domain); +extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, + u32 pmac_id, u32 domain); extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id, u32 domain); -extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); +extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle, + u32 domain); extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_queue_info *eq, int eq_delay); extern int be_cmd_cq_create(struct be_adapter *adapter, @@ -1076,4 +1145,7 @@ extern int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd); extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); extern void be_detect_dump_ue(struct be_adapter *adapter); +extern int be_cmd_get_die_temperature(struct be_adapter *adapter); +extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter); +extern int be_cmd_check_native_mode(struct be_adapter *adapter); diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index b4be0271efe0..aac248fbd18b 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,11 +8,11 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@serverengines.com + * linux-drivers@emulex.com * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #include "be.h" @@ -26,7 +26,8 @@ struct be_ethtool_stat { int offset; }; -enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT}; +enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT, + PMEMSTAT, DRVSTAT}; #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ offsetof(_struct, field) #define NETSTAT_INFO(field) #field, NETSTAT,\ @@ -43,6 +44,11 @@ enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT}; field) #define ERXSTAT_INFO(field) #field, ERXSTAT,\ FIELDINFO(struct be_erx_stats, field) +#define PMEMSTAT_INFO(field) #field, PMEMSTAT,\ + FIELDINFO(struct be_pmem_stats, field) +#define DRVSTAT_INFO(field) #field, DRVSTAT,\ + FIELDINFO(struct be_drv_stats, \ + field) static const struct be_ethtool_stat et_stats[] = { {NETSTAT_INFO(rx_packets)}, @@ -99,7 +105,11 @@ static const struct be_ethtool_stat et_stats[] = { {MISCSTAT_INFO(rx_drops_too_many_frags)}, {MISCSTAT_INFO(rx_drops_invalid_ring)}, {MISCSTAT_INFO(forwarded_packets)}, - {MISCSTAT_INFO(rx_drops_mtu)} + {MISCSTAT_INFO(rx_drops_mtu)}, + {MISCSTAT_INFO(port0_jabber_events)}, + {MISCSTAT_INFO(port1_jabber_events)}, + {PMEMSTAT_INFO(eth_red_drops)}, + {DRVSTAT_INFO(be_on_die_temperature)} }; #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) @@ -121,7 +131,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = { "MAC Loopback test", "PHY Loopback test", "External Loopback test", - "DDR DMA test" + "DDR DMA test", "Link test" }; @@ -276,6 +286,12 @@ be_get_ethtool_stats(struct net_device *netdev, case MISCSTAT: p = &hw_stats->rxf; break; + case PMEMSTAT: + p = &hw_stats->pmem; + break; + case DRVSTAT: + p = &adapter->drv_stats; + break; } p = (u8 *)p + et_stats[i].offset; @@ -376,8 +392,9 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) } phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info); - phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size, - &phy_cmd.dma); + phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, + phy_cmd.size, &phy_cmd.dma, + GFP_KERNEL); if (!phy_cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); return -ENOMEM; @@ -416,8 +433,8 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) adapter->port_type = ecmd->port; adapter->transceiver = ecmd->transceiver; adapter->autoneg = ecmd->autoneg; - pci_free_consistent(adapter->pdev, phy_cmd.size, - phy_cmd.va, phy_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va, + phy_cmd.dma); } else { ecmd->speed = adapter->link_speed; ecmd->port = adapter->port_type; @@ -496,7 +513,7 @@ be_phys_id(struct net_device *netdev, u32 data) int status; u32 cur; - be_cmd_get_beacon_state(adapter, adapter->port_num, &cur); + be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur); if (cur == BEACON_STATE_ENABLED) return 0; @@ -504,23 +521,34 @@ be_phys_id(struct net_device *netdev, u32 data) if (data < 2) data = 2; - status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, BEACON_STATE_ENABLED); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(data*HZ); - status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0, + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, BEACON_STATE_DISABLED); return status; } +static bool +be_is_wol_supported(struct be_adapter *adapter) +{ + if (!be_physfn(adapter)) + return false; + else + return true; +} + static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct be_adapter *adapter = netdev_priv(netdev); - wol->supported = WAKE_MAGIC; + if (be_is_wol_supported(adapter)) + wol->supported = WAKE_MAGIC; + if (adapter->wol) wol->wolopts = WAKE_MAGIC; else @@ -536,7 +564,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; - if (wol->wolopts & WAKE_MAGIC) + if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter)) adapter->wol = true; else adapter->wol = false; @@ -554,8 +582,8 @@ be_test_ddr_dma(struct be_adapter *adapter) }; ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); - ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, - &ddrdma_cmd.dma); + ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, + &ddrdma_cmd.dma, GFP_KERNEL); if (!ddrdma_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); return -ENOMEM; @@ -569,20 +597,20 @@ be_test_ddr_dma(struct be_adapter *adapter) } err: - pci_free_consistent(adapter->pdev, ddrdma_cmd.size, - ddrdma_cmd.va, ddrdma_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va, + ddrdma_cmd.dma); return ret; } static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, u64 *status) { - be_cmd_set_loopback(adapter, adapter->port_num, + be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1); - *status = be_cmd_loopback_test(adapter, adapter->port_num, + *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, loopback_type, 1500, 2, 0xabc); - be_cmd_set_loopback(adapter, adapter->port_num, + be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1); return *status; } @@ -621,7 +649,8 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) &qos_link_speed) != 0) { test->flags |= ETH_TEST_FL_FAILED; data[4] = -1; - } else if (mac_speed) { + } else if (!mac_speed) { + test->flags |= ETH_TEST_FL_FAILED; data[4] = 1; } } @@ -662,8 +691,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); - eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size, - &eeprom_cmd.dma); + eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, + &eeprom_cmd.dma, GFP_KERNEL); if (!eeprom_cmd.va) { dev_err(&adapter->pdev->dev, @@ -677,8 +706,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va; memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len); } - pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va, - eeprom_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va, + eeprom_cmd.dma); return status; } diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h index 4096d9778234..d4344a06090b 100644 --- a/drivers/net/benet/be_hw.h +++ b/drivers/net/benet/be_hw.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,11 +8,11 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@serverengines.com + * linux-drivers@emulex.com * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ /********* Mailbox door bell *************/ @@ -44,6 +44,18 @@ #define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */ #define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */ + +/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */ +#define SLIPORT_STATUS_OFFSET 0x404 +#define SLIPORT_CONTROL_OFFSET 0x408 + +#define SLIPORT_STATUS_ERR_MASK 0x80000000 +#define SLIPORT_STATUS_RN_MASK 0x01000000 +#define SLIPORT_STATUS_RDY_MASK 0x00800000 + + +#define SLI_PORT_CONTROL_IP_MASK 0x08000000 + /********* Memory BAR register ************/ #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc /* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt @@ -289,10 +301,10 @@ struct be_eth_rx_d { /* RX Compl Queue Descriptor */ -/* Pseudo amap definition for eth_rx_compl in which each bit of the - * actual structure is defined as a byte: used to calculate +/* Pseudo amap definition for BE2 and BE3 legacy mode eth_rx_compl in which + * each bit of the actual structure is defined as a byte: used to calculate * offset/shift/mask of each field */ -struct amap_eth_rx_compl { +struct amap_eth_rx_compl_v0 { u8 vlan_tag[16]; /* dword 0 */ u8 pktsize[14]; /* dword 0 */ u8 port; /* dword 0 */ @@ -323,10 +335,92 @@ struct amap_eth_rx_compl { u8 rsshash[32]; /* dword 3 */ } __packed; +/* Pseudo amap definition for BE3 native mode eth_rx_compl in which + * each bit of the actual structure is defined as a byte: used to calculate + * offset/shift/mask of each field */ +struct amap_eth_rx_compl_v1 { + u8 vlan_tag[16]; /* dword 0 */ + u8 pktsize[14]; /* dword 0 */ + u8 vtp; /* dword 0 */ + u8 ip_opt; /* dword 0 */ + u8 err; /* dword 1 */ + u8 rsshp; /* dword 1 */ + u8 ipf; /* dword 1 */ + u8 tcpf; /* dword 1 */ + u8 udpf; /* dword 1 */ + u8 ipcksm; /* dword 1 */ + u8 l4_cksm; /* dword 1 */ + u8 ip_version; /* dword 1 */ + u8 macdst[7]; /* dword 1 */ + u8 rsvd0; /* dword 1 */ + u8 fragndx[10]; /* dword 1 */ + u8 ct[2]; /* dword 1 */ + u8 sw; /* dword 1 */ + u8 numfrags[3]; /* dword 1 */ + u8 rss_flush; /* dword 2 */ + u8 cast_enc[2]; /* dword 2 */ + u8 vtm; /* dword 2 */ + u8 rss_bank; /* dword 2 */ + u8 port[2]; /* dword 2 */ + u8 vntagp; /* dword 2 */ + u8 header_len[8]; /* dword 2 */ + u8 header_split[2]; /* dword 2 */ + u8 rsvd1[13]; /* dword 2 */ + u8 valid; /* dword 2 */ + u8 rsshash[32]; /* dword 3 */ +} __packed; + struct be_eth_rx_compl { u32 dw[4]; }; +struct mgmt_hba_attribs { + u8 flashrom_version_string[32]; + u8 manufacturer_name[32]; + u32 supported_modes; + u32 rsvd0[3]; + u8 ncsi_ver_string[12]; + u32 default_extended_timeout; + u8 controller_model_number[32]; + u8 controller_description[64]; + u8 controller_serial_number[32]; + u8 ip_version_string[32]; + u8 firmware_version_string[32]; + u8 bios_version_string[32]; + u8 redboot_version_string[32]; + u8 driver_version_string[32]; + u8 fw_on_flash_version_string[32]; + u32 functionalities_supported; + u16 max_cdblength; + u8 asic_revision; + u8 generational_guid[16]; + u8 hba_port_count; + u16 default_link_down_timeout; + u8 iscsi_ver_min_max; + u8 multifunction_device; + u8 cache_valid; + u8 hba_status; + u8 max_domains_supported; + u8 phy_port; + u32 firmware_post_status; + u32 hba_mtu[8]; + u32 rsvd1[4]; +}; + +struct mgmt_controller_attrib { + struct mgmt_hba_attribs hba_attribs; + u16 pci_vendor_id; + u16 pci_device_id; + u16 pci_sub_vendor_id; + u16 pci_sub_system_id; + u8 pci_bus_number; + u8 pci_device_number; + u8 pci_function_number; + u8 interface_type; + u64 unique_identifier; + u32 rsvd0[5]; +}; + struct controller_id { u32 vendor; u32 device; diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 28a32a6c8bf1..a71163f1e34b 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2010 ServerEngines + * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -8,11 +8,11 @@ * Public License is included in this distribution in the file called COPYING. * * Contact Information: - * linux-drivers@serverengines.com + * linux-drivers@emulex.com * - * ServerEngines - * 209 N. Fair Oaks Ave - * Sunnyvale, CA 94085 + * Emulex + * 3333 Susan Street + * Costa Mesa, CA 92626 */ #include "be.h" @@ -25,9 +25,9 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); MODULE_AUTHOR("ServerEngines Corporation"); MODULE_LICENSE("GPL"); -static unsigned int rx_frag_size = 2048; +static ushort rx_frag_size = 2048; static unsigned int num_vfs; -module_param(rx_frag_size, uint, S_IRUGO); +module_param(rx_frag_size, ushort, S_IRUGO); module_param(num_vfs, uint, S_IRUGO); MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); @@ -125,8 +125,8 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) { struct be_dma_mem *mem = &q->dma_mem; if (mem->va) - pci_free_consistent(adapter->pdev, mem->size, - mem->va, mem->dma); + dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, + mem->dma); } static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, @@ -138,7 +138,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, q->len = len; q->entry_size = entry_size; mem->size = len * entry_size; - mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma); + mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, + GFP_KERNEL); if (!mem->va) return -1; memset(mem->va, 0, mem->size); @@ -235,12 +236,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (!be_physfn(adapter)) goto netdev_addr; - status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); + status = be_cmd_pmac_del(adapter, adapter->if_handle, + adapter->pmac_id, 0); if (status) return status; status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, - adapter->if_handle, &adapter->pmac_id); + adapter->if_handle, &adapter->pmac_id, 0); netdev_addr: if (!status) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); @@ -484,7 +486,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); } -static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, +static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, bool unmap_single) { dma_addr_t dma; @@ -494,11 +496,10 @@ static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; if (wrb->frag_len) { if (unmap_single) - pci_unmap_single(pdev, dma, wrb->frag_len, - PCI_DMA_TODEVICE); + dma_unmap_single(dev, dma, wrb->frag_len, + DMA_TO_DEVICE); else - pci_unmap_page(pdev, dma, wrb->frag_len, - PCI_DMA_TODEVICE); + dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE); } } @@ -507,7 +508,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, { dma_addr_t busaddr; int i, copied = 0; - struct pci_dev *pdev = adapter->pdev; + struct device *dev = &adapter->pdev->dev; struct sk_buff *first_skb = skb; struct be_queue_info *txq = &adapter->tx_obj.q; struct be_eth_wrb *wrb; @@ -521,9 +522,8 @@ static int make_tx_wrbs(struct be_adapter *adapter, if (skb->len > skb->data_len) { int len = skb_headlen(skb); - busaddr = pci_map_single(pdev, skb->data, len, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, busaddr)) + busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(dev, busaddr)) goto dma_err; map_single = true; wrb = queue_head_node(txq); @@ -536,10 +536,9 @@ static int make_tx_wrbs(struct be_adapter *adapter, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; - busaddr = pci_map_page(pdev, frag->page, - frag->page_offset, - frag->size, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, busaddr)) + busaddr = dma_map_page(dev, frag->page, frag->page_offset, + frag->size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, busaddr)) goto dma_err; wrb = queue_head_node(txq); wrb_fill(wrb, busaddr, frag->size); @@ -563,7 +562,7 @@ dma_err: txq->head = map_head; while (copied) { wrb = queue_head_node(txq); - unmap_tx_frag(pdev, wrb, map_single); + unmap_tx_frag(dev, wrb, map_single); map_single = false; copied -= wrb->frag_len; queue_head_inc(txq); @@ -743,11 +742,11 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) status = be_cmd_pmac_del(adapter, adapter->vf_cfg[vf].vf_if_handle, - adapter->vf_cfg[vf].vf_pmac_id); + adapter->vf_cfg[vf].vf_pmac_id, vf + 1); status = be_cmd_pmac_add(adapter, mac, adapter->vf_cfg[vf].vf_if_handle, - &adapter->vf_cfg[vf].vf_pmac_id); + &adapter->vf_cfg[vf].vf_pmac_id, vf + 1); if (status) dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", @@ -822,7 +821,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev, rate = 10000; adapter->vf_cfg[vf].vf_tx_rate = rate; - status = be_cmd_set_qos(adapter, rate / 10, vf); + status = be_cmd_set_qos(adapter, rate / 10, vf + 1); if (status) dev_info(&adapter->pdev->dev, @@ -852,28 +851,26 @@ static void be_rx_rate_update(struct be_rx_obj *rxo) } static void be_rx_stats_update(struct be_rx_obj *rxo, - u32 pktsize, u16 numfrags, u8 pkt_type) + struct be_rx_compl_info *rxcp) { struct be_rx_stats *stats = &rxo->stats; stats->rx_compl++; - stats->rx_frags += numfrags; - stats->rx_bytes += pktsize; + stats->rx_frags += rxcp->num_rcvd; + stats->rx_bytes += rxcp->pkt_size; stats->rx_pkts++; - if (pkt_type == BE_MULTICAST_PACKET) + if (rxcp->pkt_type == BE_MULTICAST_PACKET) stats->rx_mcast_pkts++; + if (rxcp->err) + stats->rxcp_err++; } -static inline bool csum_passed(struct be_eth_rx_compl *rxcp) +static inline bool csum_passed(struct be_rx_compl_info *rxcp) { - u8 l4_cksm, ipv6, ipcksm; - - l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp); - ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp); - ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp); - - /* Ignore ipcksm for ipv6 pkts */ - return l4_cksm && (ipcksm || ipv6); + /* L4 checksum is not reliable for non TCP/UDP packets. + * Also ignore ipcksm for ipv6 pkts */ + return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum && + (rxcp->ip_csum || rxcp->ipv6); } static struct be_rx_page_info * @@ -888,8 +885,9 @@ get_rx_page_info(struct be_adapter *adapter, BUG_ON(!rx_page_info->page); if (rx_page_info->last_page_user) { - pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus), - adapter->big_page_size, PCI_DMA_FROMDEVICE); + dma_unmap_page(&adapter->pdev->dev, + dma_unmap_addr(rx_page_info, bus), + adapter->big_page_size, DMA_FROM_DEVICE); rx_page_info->last_page_user = false; } @@ -900,26 +898,17 @@ get_rx_page_info(struct be_adapter *adapter, /* Throwaway the data in the Rx completion */ static void be_rx_compl_discard(struct be_adapter *adapter, struct be_rx_obj *rxo, - struct be_eth_rx_compl *rxcp) + struct be_rx_compl_info *rxcp) { struct be_queue_info *rxq = &rxo->q; struct be_rx_page_info *page_info; - u16 rxq_idx, i, num_rcvd; - - rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); - num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); + u16 i, num_rcvd = rxcp->num_rcvd; - /* Skip out-of-buffer compl(lancer) or flush compl(BE) */ - if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) { - - rxo->last_frag_index = rxq_idx; - - for (i = 0; i < num_rcvd; i++) { - page_info = get_rx_page_info(adapter, rxo, rxq_idx); - put_page(page_info->page); - memset(page_info, 0, sizeof(*page_info)); - index_inc(&rxq_idx, rxq->len); - } + for (i = 0; i < num_rcvd; i++) { + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); + put_page(page_info->page); + memset(page_info, 0, sizeof(*page_info)); + index_inc(&rxcp->rxq_idx, rxq->len); } } @@ -928,30 +917,23 @@ static void be_rx_compl_discard(struct be_adapter *adapter, * indicated by rxcp. */ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, - struct sk_buff *skb, struct be_eth_rx_compl *rxcp, - u16 num_rcvd) + struct sk_buff *skb, struct be_rx_compl_info *rxcp) { struct be_queue_info *rxq = &rxo->q; struct be_rx_page_info *page_info; - u16 rxq_idx, i, j; - u32 pktsize, hdr_len, curr_frag_len, size; + u16 i, j; + u16 hdr_len, curr_frag_len, remaining; u8 *start; - u8 pkt_type; - - rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); - pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); - pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); - - page_info = get_rx_page_info(adapter, rxo, rxq_idx); + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); start = page_address(page_info->page) + page_info->page_offset; prefetch(start); /* Copy data in the first descriptor of this completion */ - curr_frag_len = min(pktsize, rx_frag_size); + curr_frag_len = min(rxcp->pkt_size, rx_frag_size); /* Copy the header portion into skb_data */ - hdr_len = min((u32)BE_HDR_LEN, curr_frag_len); + hdr_len = min(BE_HDR_LEN, curr_frag_len); memcpy(skb->data, start, hdr_len); skb->len = curr_frag_len; if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ @@ -970,19 +952,17 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, } page_info->page = NULL; - if (pktsize <= rx_frag_size) { - BUG_ON(num_rcvd != 1); - goto done; + if (rxcp->pkt_size <= rx_frag_size) { + BUG_ON(rxcp->num_rcvd != 1); + return; } /* More frags present for this completion */ - size = pktsize; - for (i = 1, j = 0; i < num_rcvd; i++) { - size -= curr_frag_len; - index_inc(&rxq_idx, rxq->len); - page_info = get_rx_page_info(adapter, rxo, rxq_idx); - - curr_frag_len = min(size, rx_frag_size); + index_inc(&rxcp->rxq_idx, rxq->len); + remaining = rxcp->pkt_size - curr_frag_len; + for (i = 1, j = 0; i < rxcp->num_rcvd; i++) { + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); + curr_frag_len = min(remaining, rx_frag_size); /* Coalesce all frags from the same physical page in one slot */ if (page_info->page_offset == 0) { @@ -1001,25 +981,19 @@ static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo, skb->len += curr_frag_len; skb->data_len += curr_frag_len; + remaining -= curr_frag_len; + index_inc(&rxcp->rxq_idx, rxq->len); page_info->page = NULL; } BUG_ON(j > MAX_SKB_FRAGS); - -done: - be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type); } /* Process the RX completion indicated by rxcp when GRO is disabled */ static void be_rx_compl_process(struct be_adapter *adapter, struct be_rx_obj *rxo, - struct be_eth_rx_compl *rxcp) + struct be_rx_compl_info *rxcp) { struct sk_buff *skb; - u32 vlanf, vid; - u16 num_rcvd; - u8 vtm; - - num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); if (unlikely(!skb)) { @@ -1029,7 +1003,7 @@ static void be_rx_compl_process(struct be_adapter *adapter, return; } - skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd); + skb_fill_rx_data(adapter, rxo, skb, rxcp); if (likely(adapter->rx_csum && csum_passed(rxcp))) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -1039,23 +1013,12 @@ static void be_rx_compl_process(struct be_adapter *adapter, skb->truesize = skb->len + sizeof(struct sk_buff); skb->protocol = eth_type_trans(skb, adapter->netdev); - vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); - vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); - - /* vlanf could be wrongly set in some cards. - * ignore if vtm is not set */ - if ((adapter->function_mode & 0x400) && !vtm) - vlanf = 0; - - if (unlikely(vlanf)) { + if (unlikely(rxcp->vlanf)) { if (!adapter->vlan_grp || adapter->vlans_added == 0) { kfree_skb(skb); return; } - vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); - if (!lancer_chip(adapter)) - vid = swab16(vid); - vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); + vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid); } else { netif_receive_skb(skb); } @@ -1064,28 +1027,14 @@ static void be_rx_compl_process(struct be_adapter *adapter, /* Process the RX completion indicated by rxcp when GRO is enabled */ static void be_rx_compl_process_gro(struct be_adapter *adapter, struct be_rx_obj *rxo, - struct be_eth_rx_compl *rxcp) + struct be_rx_compl_info *rxcp) { struct be_rx_page_info *page_info; struct sk_buff *skb = NULL; struct be_queue_info *rxq = &rxo->q; struct be_eq_obj *eq_obj = &rxo->rx_eq; - u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len; - u16 i, rxq_idx = 0, vid, j; - u8 vtm; - u8 pkt_type; - - num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); - pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); - vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); - rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); - vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); - pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp); - - /* vlanf could be wrongly set in some cards. - * ignore if vtm is not set */ - if ((adapter->function_mode & 0x400) && !vtm) - vlanf = 0; + u16 remaining, curr_frag_len; + u16 i, j; skb = napi_get_frags(&eq_obj->napi); if (!skb) { @@ -1093,9 +1042,9 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, return; } - remaining = pkt_size; - for (i = 0, j = -1; i < num_rcvd; i++) { - page_info = get_rx_page_info(adapter, rxo, rxq_idx); + remaining = rxcp->pkt_size; + for (i = 0, j = -1; i < rxcp->num_rcvd; i++) { + page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx); curr_frag_len = min(remaining, rx_frag_size); @@ -1113,70 +1062,125 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, skb_shinfo(skb)->frags[j].size += curr_frag_len; remaining -= curr_frag_len; - index_inc(&rxq_idx, rxq->len); + index_inc(&rxcp->rxq_idx, rxq->len); memset(page_info, 0, sizeof(*page_info)); } BUG_ON(j > MAX_SKB_FRAGS); skb_shinfo(skb)->nr_frags = j + 1; - skb->len = pkt_size; - skb->data_len = pkt_size; - skb->truesize += pkt_size; + skb->len = rxcp->pkt_size; + skb->data_len = rxcp->pkt_size; + skb->truesize += rxcp->pkt_size; skb->ip_summed = CHECKSUM_UNNECESSARY; - if (likely(!vlanf)) { + if (likely(!rxcp->vlanf)) napi_gro_frags(&eq_obj->napi); - } else { - vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); - if (!lancer_chip(adapter)) - vid = swab16(vid); + else + vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid); +} + +static void be_parse_rx_compl_v1(struct be_adapter *adapter, + struct be_eth_rx_compl *compl, + struct be_rx_compl_info *rxcp) +{ + rxcp->pkt_size = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl); + rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl); + rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl); + rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl); + rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl); + rxcp->ip_csum = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl); + rxcp->l4_csum = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl); + rxcp->ipv6 = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl); + rxcp->rxq_idx = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl); + rxcp->num_rcvd = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl); + rxcp->pkt_type = + AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); + rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); + rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl); +} + +static void be_parse_rx_compl_v0(struct be_adapter *adapter, + struct be_eth_rx_compl *compl, + struct be_rx_compl_info *rxcp) +{ + rxcp->pkt_size = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl); + rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl); + rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl); + rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl); + rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl); + rxcp->ip_csum = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl); + rxcp->l4_csum = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl); + rxcp->ipv6 = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl); + rxcp->rxq_idx = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl); + rxcp->num_rcvd = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl); + rxcp->pkt_type = + AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); + rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); + rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl); +} + +static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) +{ + struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq); + struct be_rx_compl_info *rxcp = &rxo->rxcp; + struct be_adapter *adapter = rxo->adapter; - if (!adapter->vlan_grp || adapter->vlans_added == 0) - return; + /* For checking the valid bit it is Ok to use either definition as the + * valid bit is at the same position in both v0 and v1 Rx compl */ + if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0) + return NULL; - vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid); - } + rmb(); + be_dws_le_to_cpu(compl, sizeof(*compl)); - be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type); -} + if (adapter->be3_native) + be_parse_rx_compl_v1(adapter, compl, rxcp); + else + be_parse_rx_compl_v0(adapter, compl, rxcp); -static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo) -{ - struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq); + /* vlanf could be wrongly set in some cards. ignore if vtm is not set */ + if ((adapter->function_mode & 0x400) && !rxcp->vtm) + rxcp->vlanf = 0; - if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) - return NULL; + if (!lancer_chip(adapter)) + rxcp->vid = swab16(rxcp->vid); - rmb(); - be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); + if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid]) + rxcp->vlanf = 0; + + /* As the compl has been parsed, reset it; we wont touch it again */ + compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0; queue_tail_inc(&rxo->cq); return rxcp; } -/* To reset the valid bit, we need to reset the whole word as - * when walking the queue the valid entries are little-endian - * and invalid entries are host endian - */ -static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp) +static inline struct page *be_alloc_pages(u32 size, gfp_t gfp) { - rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0; -} - -static inline struct page *be_alloc_pages(u32 size) -{ - gfp_t alloc_flags = GFP_ATOMIC; u32 order = get_order(size); + if (order > 0) - alloc_flags |= __GFP_COMP; - return alloc_pages(alloc_flags, order); + gfp |= __GFP_COMP; + return alloc_pages(gfp, order); } /* * Allocate a page, split it to fragments of size rx_frag_size and post as * receive buffers to BE */ -static void be_post_rx_frags(struct be_rx_obj *rxo) +static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp) { struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl; @@ -1190,14 +1194,14 @@ static void be_post_rx_frags(struct be_rx_obj *rxo) page_info = &rxo->page_info_tbl[rxq->head]; for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) { if (!pagep) { - pagep = be_alloc_pages(adapter->big_page_size); + pagep = be_alloc_pages(adapter->big_page_size, gfp); if (unlikely(!pagep)) { rxo->stats.rx_post_fail++; break; } - page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0, - adapter->big_page_size, - PCI_DMA_FROMDEVICE); + page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep, + 0, adapter->big_page_size, + DMA_FROM_DEVICE); page_info->page_offset = 0; } else { get_page(pagep); @@ -1270,8 +1274,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) do { cur_index = txq->tail; wrb = queue_tail_node(txq); - unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr && - skb_headlen(sent_skb))); + unmap_tx_frag(&adapter->pdev->dev, wrb, + (unmap_skb_hdr && skb_headlen(sent_skb))); unmap_skb_hdr = false; num_wrbs++; @@ -1339,13 +1343,12 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo) struct be_rx_page_info *page_info; struct be_queue_info *rxq = &rxo->q; struct be_queue_info *rx_cq = &rxo->cq; - struct be_eth_rx_compl *rxcp; + struct be_rx_compl_info *rxcp; u16 tail; /* First cleanup pending rx completions */ while ((rxcp = be_rx_compl_get(rxo)) != NULL) { be_rx_compl_discard(adapter, rxo, rxcp); - be_rx_compl_reset(rxcp); be_cq_notify(adapter, rx_cq->id, false, 1); } @@ -1573,9 +1576,6 @@ static int be_rx_queues_create(struct be_adapter *adapter) adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; for_all_rx_queues(adapter, rxo, i) { rxo->adapter = adapter; - /* Init last_frag_index so that the frag index in the first - * completion will never match */ - rxo->last_frag_index = 0xffff; rxo->rx_eq.max_eqd = BE_MAX_EQD; rxo->rx_eq.enable_aic = true; @@ -1697,15 +1697,9 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev) return IRQ_HANDLED; } -static inline bool do_gro(struct be_rx_obj *rxo, - struct be_eth_rx_compl *rxcp, u8 err) +static inline bool do_gro(struct be_rx_compl_info *rxcp) { - int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); - - if (err) - rxo->stats.rxcp_err++; - - return (tcp_frame && !err) ? true : false; + return (rxcp->tcpf && !rxcp->err) ? true : false; } static int be_poll_rx(struct napi_struct *napi, int budget) @@ -1714,10 +1708,8 @@ static int be_poll_rx(struct napi_struct *napi, int budget) struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq); struct be_adapter *adapter = rxo->adapter; struct be_queue_info *rx_cq = &rxo->cq; - struct be_eth_rx_compl *rxcp; + struct be_rx_compl_info *rxcp; u32 work_done; - u16 frag_index, num_rcvd; - u8 err; rxo->stats.rx_polls++; for (work_done = 0; work_done < budget; work_done++) { @@ -1725,29 +1717,19 @@ static int be_poll_rx(struct napi_struct *napi, int budget) if (!rxcp) break; - err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp); - frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, - rxcp); - num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, - rxcp); - - /* Skip out-of-buffer compl(lancer) or flush compl(BE) */ - if (likely(frag_index != rxo->last_frag_index && - num_rcvd != 0)) { - rxo->last_frag_index = frag_index; - - if (do_gro(rxo, rxcp, err)) + /* Ignore flush completions */ + if (rxcp->num_rcvd) { + if (do_gro(rxcp)) be_rx_compl_process_gro(adapter, rxo, rxcp); else be_rx_compl_process(adapter, rxo, rxcp); } - - be_rx_compl_reset(rxcp); + be_rx_stats_update(rxo, rxcp); } /* Refill the queue */ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM) - be_post_rx_frags(rxo); + be_post_rx_frags(rxo, GFP_ATOMIC); /* All consumed */ if (work_done < budget) { @@ -1827,6 +1809,7 @@ void be_detect_dump_ue(struct be_adapter *adapter) if (ue_status_lo || ue_status_hi) { adapter->ue_detected = true; + adapter->eeh_err = true; dev_err(&adapter->pdev->dev, "UE Detected!!\n"); } @@ -1865,10 +1848,14 @@ static void be_worker(struct work_struct *work) struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl); } + + if (!adapter->ue_detected && !lancer_chip(adapter)) + be_detect_dump_ue(adapter); + goto reschedule; } - if (!adapter->stats_ioctl_sent) + if (!adapter->stats_cmd_sent) be_cmd_get_stats(adapter, &adapter->stats_cmd); be_tx_rate_update(adapter); @@ -1879,7 +1866,7 @@ static void be_worker(struct work_struct *work) if (rxo->rx_post_starved) { rxo->rx_post_starved = false; - be_post_rx_frags(rxo); + be_post_rx_frags(rxo, GFP_KERNEL); } } if (!adapter->ue_detected && !lancer_chip(adapter)) @@ -2083,13 +2070,24 @@ static int be_close(struct net_device *netdev) be_async_mcc_disable(adapter); - netif_stop_queue(netdev); netif_carrier_off(netdev); adapter->link_up = false; if (!lancer_chip(adapter)) be_intr_set(adapter, false); + for_all_rx_queues(adapter, rxo, i) + napi_disable(&rxo->rx_eq.napi); + + napi_disable(&tx_eq->napi); + + if (lancer_chip(adapter)) { + be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0); + be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); + for_all_rx_queues(adapter, rxo, i) + be_cq_notify(adapter, rxo->cq.id, false, 0); + } + if (adapter->msix_enabled) { vec = be_msix_vec_get(adapter, tx_eq); synchronize_irq(vec); @@ -2103,11 +2101,6 @@ static int be_close(struct net_device *netdev) } be_irq_unregister(adapter); - for_all_rx_queues(adapter, rxo, i) - napi_disable(&rxo->rx_eq.napi); - - napi_disable(&tx_eq->napi); - /* Wait for all pending tx completions to arrive so that * all tx skbs are freed. */ @@ -2127,7 +2120,7 @@ static int be_open(struct net_device *netdev) u16 link_speed; for_all_rx_queues(adapter, rxo, i) { - be_post_rx_frags(rxo); + be_post_rx_frags(rxo, GFP_KERNEL); napi_enable(&rxo->rx_eq.napi); } napi_enable(&tx_eq->napi); @@ -2179,7 +2172,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) memset(mac, 0, ETH_ALEN); cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); + cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, + GFP_KERNEL); if (cmd.va == NULL) return -1; memset(cmd.va, 0, cmd.size); @@ -2190,8 +2184,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) if (status) { dev_err(&adapter->pdev->dev, "Could not enable Wake-on-lan\n"); - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, - cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, + cmd.dma); return status; } status = be_cmd_enable_magic_wol(adapter, @@ -2204,7 +2198,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable) pci_enable_wake(adapter->pdev, PCI_D3cold, 0); } - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -2225,7 +2219,8 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter) for (vf = 0; vf < num_vfs; vf++) { status = be_cmd_pmac_add(adapter, mac, adapter->vf_cfg[vf].vf_if_handle, - &adapter->vf_cfg[vf].vf_pmac_id); + &adapter->vf_cfg[vf].vf_pmac_id, + vf + 1); if (status) dev_err(&adapter->pdev->dev, "Mac address add failed for VF %d\n", vf); @@ -2245,7 +2240,7 @@ static inline void be_vf_eth_addr_rem(struct be_adapter *adapter) if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID) be_cmd_pmac_del(adapter, adapter->vf_cfg[vf].vf_if_handle, - adapter->vf_cfg[vf].vf_pmac_id); + adapter->vf_cfg[vf].vf_pmac_id, vf + 1); } } @@ -2256,7 +2251,9 @@ static int be_setup(struct be_adapter *adapter) int status; u8 mac[ETH_ALEN]; - cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST; + cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | + BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST; if (be_physfn(adapter)) { cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS | @@ -2277,22 +2274,26 @@ static int be_setup(struct be_adapter *adapter) goto do_none; if (be_physfn(adapter)) { - while (vf < num_vfs) { - cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED - | BE_IF_FLAGS_BROADCAST; - status = be_cmd_if_create(adapter, cap_flags, en_flags, - mac, true, + if (adapter->sriov_enabled) { + while (vf < num_vfs) { + cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | + BE_IF_FLAGS_BROADCAST; + status = be_cmd_if_create(adapter, cap_flags, + en_flags, mac, true, &adapter->vf_cfg[vf].vf_if_handle, NULL, vf+1); - if (status) { - dev_err(&adapter->pdev->dev, - "Interface Create failed for VF %d\n", vf); - goto if_destroy; + if (status) { + dev_err(&adapter->pdev->dev, + "Interface Create failed for VF %d\n", + vf); + goto if_destroy; + } + adapter->vf_cfg[vf].vf_pmac_id = + BE_INVALID_PMAC_ID; + vf++; } - adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID; - vf++; } - } else if (!be_physfn(adapter)) { + } else { status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle); if (!status) { @@ -2313,44 +2314,46 @@ static int be_setup(struct be_adapter *adapter) if (status != 0) goto rx_qs_destroy; - if (be_physfn(adapter)) { - status = be_vf_eth_addr_config(adapter); - if (status) - goto mcc_q_destroy; - } - adapter->link_speed = -1; return 0; -mcc_q_destroy: - if (be_physfn(adapter)) - be_vf_eth_addr_rem(adapter); be_mcc_queues_destroy(adapter); rx_qs_destroy: be_rx_queues_destroy(adapter); tx_qs_destroy: be_tx_queues_destroy(adapter); if_destroy: - for (vf = 0; vf < num_vfs; vf++) - if (adapter->vf_cfg[vf].vf_if_handle) - be_cmd_if_destroy(adapter, - adapter->vf_cfg[vf].vf_if_handle); - be_cmd_if_destroy(adapter, adapter->if_handle); + if (be_physfn(adapter) && adapter->sriov_enabled) + for (vf = 0; vf < num_vfs; vf++) + if (adapter->vf_cfg[vf].vf_if_handle) + be_cmd_if_destroy(adapter, + adapter->vf_cfg[vf].vf_if_handle, + vf + 1); + be_cmd_if_destroy(adapter, adapter->if_handle, 0); do_none: return status; } static int be_clear(struct be_adapter *adapter) { - if (be_physfn(adapter)) + int vf; + + if (be_physfn(adapter) && adapter->sriov_enabled) be_vf_eth_addr_rem(adapter); be_mcc_queues_destroy(adapter); be_rx_queues_destroy(adapter); be_tx_queues_destroy(adapter); - be_cmd_if_destroy(adapter, adapter->if_handle); + if (be_physfn(adapter) && adapter->sriov_enabled) + for (vf = 0; vf < num_vfs; vf++) + if (adapter->vf_cfg[vf].vf_if_handle) + be_cmd_if_destroy(adapter, + adapter->vf_cfg[vf].vf_if_handle, + vf + 1); + + be_cmd_if_destroy(adapter, adapter->if_handle, 0); /* tell fw we're done with firing cmds */ be_cmd_fw_clean(adapter); @@ -2453,8 +2456,8 @@ static int be_flash_data(struct be_adapter *adapter, continue; if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) && (!be_flash_redboot(adapter, fw->data, - pflashcomp[i].offset, pflashcomp[i].size, - filehdr_size))) + pflashcomp[i].offset, pflashcomp[i].size, filehdr_size + + (num_of_images * sizeof(struct image_hdr))))) continue; p = fw->data; p += filehdr_size + pflashcomp[i].offset @@ -2528,8 +2531,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024; - flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size, - &flash_cmd.dma); + flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, + &flash_cmd.dma, GFP_KERNEL); if (!flash_cmd.va) { status = -ENOMEM; dev_err(&adapter->pdev->dev, @@ -2558,8 +2561,8 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) status = -1; } - pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va, - flash_cmd.dma); + dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, + flash_cmd.dma); if (status) { dev_err(&adapter->pdev->dev, "Firmware load error\n"); goto fw_exit; @@ -2700,13 +2703,13 @@ static void be_ctrl_cleanup(struct be_adapter *adapter) be_unmap_pci_bars(adapter); if (mem->va) - pci_free_consistent(adapter->pdev, mem->size, - mem->va, mem->dma); + dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, + mem->dma); mem = &adapter->mc_cmd_mem; if (mem->va) - pci_free_consistent(adapter->pdev, mem->size, - mem->va, mem->dma); + dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, + mem->dma); } static int be_ctrl_init(struct be_adapter *adapter) @@ -2721,8 +2724,10 @@ static int be_ctrl_init(struct be_adapter *adapter) goto done; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; - mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, - mbox_mem_alloc->size, &mbox_mem_alloc->dma); + mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev, + mbox_mem_alloc->size, + &mbox_mem_alloc->dma, + GFP_KERNEL); if (!mbox_mem_alloc->va) { status = -ENOMEM; goto unmap_pci_bars; @@ -2734,8 +2739,9 @@ static int be_ctrl_init(struct be_adapter *adapter) memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config); - mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size, - &mc_cmd_mem->dma); + mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev, + mc_cmd_mem->size, &mc_cmd_mem->dma, + GFP_KERNEL); if (mc_cmd_mem->va == NULL) { status = -ENOMEM; goto free_mbox; @@ -2751,8 +2757,8 @@ static int be_ctrl_init(struct be_adapter *adapter) return 0; free_mbox: - pci_free_consistent(adapter->pdev, mbox_mem_alloc->size, - mbox_mem_alloc->va, mbox_mem_alloc->dma); + dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size, + mbox_mem_alloc->va, mbox_mem_alloc->dma); unmap_pci_bars: be_unmap_pci_bars(adapter); @@ -2766,8 +2772,8 @@ static void be_stats_cleanup(struct be_adapter *adapter) struct be_dma_mem *cmd = &adapter->stats_cmd; if (cmd->va) - pci_free_consistent(adapter->pdev, cmd->size, - cmd->va, cmd->dma); + dma_free_coherent(&adapter->pdev->dev, cmd->size, + cmd->va, cmd->dma); } static int be_stats_init(struct be_adapter *adapter) @@ -2775,7 +2781,8 @@ static int be_stats_init(struct be_adapter *adapter) struct be_dma_mem *cmd = &adapter->stats_cmd; cmd->size = sizeof(struct be_cmd_req_get_stats); - cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma); + cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, + GFP_KERNEL); if (cmd->va == NULL) return -1; memset(cmd->va, 0, cmd->size); @@ -2845,6 +2852,11 @@ static int be_get_config(struct be_adapter *adapter) else adapter->max_vlans = BE_NUM_VLANS_SUPPORTED; + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; + + be_cmd_check_native_mode(adapter); return 0; } @@ -2886,6 +2898,54 @@ static int be_dev_family_check(struct be_adapter *adapter) return 0; } +static int lancer_wait_ready(struct be_adapter *adapter) +{ +#define SLIPORT_READY_TIMEOUT 500 + u32 sliport_status; + int status = 0, i; + + for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + if (sliport_status & SLIPORT_STATUS_RDY_MASK) + break; + + msleep(20); + } + + if (i == SLIPORT_READY_TIMEOUT) + status = -1; + + return status; +} + +static int lancer_test_and_set_rdy_state(struct be_adapter *adapter) +{ + int status; + u32 sliport_status, err, reset_needed; + status = lancer_wait_ready(adapter); + if (!status) { + sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); + err = sliport_status & SLIPORT_STATUS_ERR_MASK; + reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK; + if (err && reset_needed) { + iowrite32(SLI_PORT_CONTROL_IP_MASK, + adapter->db + SLIPORT_CONTROL_OFFSET); + + /* check adapter has corrected the error */ + status = lancer_wait_ready(adapter); + sliport_status = ioread32(adapter->db + + SLIPORT_STATUS_OFFSET); + sliport_status &= (SLIPORT_STATUS_ERR_MASK | + SLIPORT_STATUS_RN_MASK); + if (status || sliport_status) + status = -1; + } else if (err || reset_needed) { + status = -1; + } + } + return status; +} + static int __devinit be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) { @@ -2918,11 +2978,11 @@ static int __devinit be_probe(struct pci_dev *pdev, adapter->netdev = netdev; SET_NETDEV_DEV(netdev, &pdev->dev); - status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (!status) { netdev->features |= NETIF_F_HIGHDMA; } else { - status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (status) { dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); goto free_netdev; @@ -2935,6 +2995,14 @@ static int __devinit be_probe(struct pci_dev *pdev, if (status) goto free_netdev; + if (lancer_chip(adapter)) { + status = lancer_test_and_set_rdy_state(adapter); + if (status) { + dev_err(&pdev->dev, "Adapter in non recoverable error\n"); + goto free_netdev; + } + } + /* sync up with fw's ready state */ if (be_physfn(adapter)) { status = be_cmd_POST(adapter); @@ -2947,11 +3015,9 @@ static int __devinit be_probe(struct pci_dev *pdev, if (status) goto ctrl_clean; - if (be_physfn(adapter)) { - status = be_cmd_reset_function(adapter); - if (status) - goto ctrl_clean; - } + status = be_cmd_reset_function(adapter); + if (status) + goto ctrl_clean; status = be_stats_init(adapter); if (status) @@ -2975,10 +3041,18 @@ static int __devinit be_probe(struct pci_dev *pdev, goto unsetup; netif_carrier_off(netdev); + if (be_physfn(adapter) && adapter->sriov_enabled) { + status = be_vf_eth_addr_config(adapter); + if (status) + goto unreg_netdev; + } + dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num); schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); return 0; +unreg_netdev: + unregister_netdev(netdev); unsetup: be_clear(adapter); msix_disable: @@ -3005,6 +3079,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) struct be_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; + cancel_delayed_work_sync(&adapter->work); if (adapter->wol) be_setup_wol(adapter, true); @@ -3017,6 +3092,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc); be_clear(adapter); + be_msix_disable(adapter); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); @@ -3038,6 +3114,7 @@ static int be_resume(struct pci_dev *pdev) pci_set_power_state(pdev, 0); pci_restore_state(pdev); + be_msix_enable(adapter); /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) @@ -3053,6 +3130,8 @@ static int be_resume(struct pci_dev *pdev) if (adapter->wol) be_setup_wol(adapter, false); + + schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); return 0; } @@ -3064,6 +3143,9 @@ static void be_shutdown(struct pci_dev *pdev) struct be_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; + if (netif_running(netdev)) + cancel_delayed_work_sync(&adapter->work); + netif_device_detach(netdev); be_cmd_reset_function(adapter); diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c index fad912656fe4..9f356d5d0f33 100644 --- a/drivers/net/bna/bnad.c +++ b/drivers/net/bna/bnad.c @@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad, } unmap_array[unmap_cons].skb = NULL; - pci_unmap_single(bnad->pcidev, - pci_unmap_addr(&unmap_array[unmap_cons], + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap_array[unmap_cons], dma_addr), skb_headlen(skb), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); - pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); + dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); if (++unmap_cons >= unmap_q->q_depth) break; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - pci_unmap_page(bnad->pcidev, - pci_unmap_addr(&unmap_array[unmap_cons], + dma_unmap_page(&bnad->pcidev->dev, + dma_unmap_addr(&unmap_array[unmap_cons], dma_addr), skb_shinfo(skb)->frags[i].size, - PCI_DMA_TODEVICE); - pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, + DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); if (++unmap_cons >= unmap_q->q_depth) break; @@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad, sent_bytes += skb->len; wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); - pci_unmap_single(bnad->pcidev, - pci_unmap_addr(&unmap_array[unmap_cons], + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap_array[unmap_cons], dma_addr), skb_headlen(skb), - PCI_DMA_TODEVICE); - pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); + DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); prefetch(&unmap_array[unmap_cons + 1]); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { prefetch(&unmap_array[unmap_cons + 1]); - pci_unmap_page(bnad->pcidev, - pci_unmap_addr(&unmap_array[unmap_cons], + dma_unmap_page(&bnad->pcidev->dev, + dma_unmap_addr(&unmap_array[unmap_cons], dma_addr), skb_shinfo(skb)->frags[i].size, - PCI_DMA_TODEVICE); - pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, + DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); } @@ -340,19 +340,22 @@ static void bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) { struct bnad_unmap_q *unmap_q; + struct bnad_skb_unmap *unmap_array; struct sk_buff *skb; int unmap_cons; unmap_q = rcb->unmap_q; + unmap_array = unmap_q->unmap_array; for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { - skb = unmap_q->unmap_array[unmap_cons].skb; + skb = unmap_array[unmap_cons].skb; if (!skb) continue; - unmap_q->unmap_array[unmap_cons].skb = NULL; - pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> - unmap_array[unmap_cons], - dma_addr), rcb->rxq->buffer_size, - PCI_DMA_FROMDEVICE); + unmap_array[unmap_cons].skb = NULL; + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap_array[unmap_cons], + dma_addr), + rcb->rxq->buffer_size, + DMA_FROM_DEVICE); dev_kfree_skb(skb); } bnad_reset_rcb(bnad, rcb); @@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) skb->dev = bnad->netdev; skb_reserve(skb, NET_IP_ALIGN); unmap_array[unmap_prod].skb = skb; - dma_addr = pci_map_single(bnad->pcidev, skb->data, - rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE); - pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, + rcb->rxq->buffer_size, + DMA_FROM_DEVICE); + dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, dma_addr); BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); @@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) struct bna_rcb *rcb = NULL; unsigned int wi_range, packets = 0, wis = 0; struct bnad_unmap_q *unmap_q; + struct bnad_skb_unmap *unmap_array; struct sk_buff *skb; - u32 flags; + u32 flags, unmap_cons; u32 qid0 = ccb->rcb[0]->rxq->rxq_id; struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; @@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) rcb = ccb->rcb[1]; unmap_q = rcb->unmap_q; + unmap_array = unmap_q->unmap_array; + unmap_cons = unmap_q->consumer_index; - skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; + skb = unmap_array[unmap_cons].skb; BUG_ON(!(skb)); - unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; - pci_unmap_single(bnad->pcidev, - pci_unmap_addr(&unmap_q-> - unmap_array[unmap_q-> - consumer_index], + unmap_array[unmap_cons].skb = NULL; + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap_array[unmap_cons], dma_addr), - rcb->rxq->buffer_size, - PCI_DMA_FROMDEVICE); + rcb->rxq->buffer_size, + DMA_FROM_DEVICE); BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); /* Should be more efficient ? Performance ? */ @@ -1015,9 +1020,9 @@ bnad_mem_free(struct bnad *bnad, if (mem_info->mem_type == BNA_MEM_T_DMA) { BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), dma_pa); - pci_free_consistent(bnad->pcidev, - mem_info->mdl[i].len, - mem_info->mdl[i].kva, dma_pa); + dma_free_coherent(&bnad->pcidev->dev, + mem_info->mdl[i].len, + mem_info->mdl[i].kva, dma_pa); } else kfree(mem_info->mdl[i].kva); } @@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad, for (i = 0; i < mem_info->num; i++) { mem_info->mdl[i].len = mem_info->len; mem_info->mdl[i].kva = - pci_alloc_consistent(bnad->pcidev, - mem_info->len, &dma_pa); + dma_alloc_coherent(&bnad->pcidev->dev, + mem_info->len, &dma_pa, + GFP_KERNEL); if (mem_info->mdl[i].kva == NULL) goto err_return; @@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) unmap_q->unmap_array[unmap_prod].skb = skb; BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR)); txqent->vector[vect_id].length = htons(skb_headlen(skb)); - dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb), - PCI_DMA_TODEVICE); - pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, dma_addr); BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); @@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); txqent->vector[vect_id].length = htons(size); - dma_addr = - pci_map_page(bnad->pcidev, frag->page, - frag->page_offset, size, - PCI_DMA_TODEVICE); - pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, + dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page, + frag->page_offset, size, DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, dma_addr); BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); @@ -3022,14 +3026,14 @@ bnad_pci_init(struct bnad *bnad, err = pci_request_regions(pdev, BNAD_NAME); if (err) goto disable_device; - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && + !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { *using_dac = 1; } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); if (err) goto release_regions; } diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h index 8b1d51557def..a89117fa4970 100644 --- a/drivers/net/bna/bnad.h +++ b/drivers/net/bna/bnad.h @@ -181,7 +181,7 @@ struct bnad_rx_info { /* Unmap queues for Tx / Rx cleanup */ struct bnad_skb_unmap { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(dma_addr) + DEFINE_DMA_UNMAP_ADDR(dma_addr); }; struct bnad_unmap_q { diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 0ba59d5aeb7f..d1865cc97313 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -1,6 +1,6 @@ /* bnx2.c: Broadcom NX2 network driver. * - * Copyright (c) 2004-2010 Broadcom Corporation + * Copyright (c) 2004-2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -56,11 +56,11 @@ #include "bnx2_fw.h" #define DRV_MODULE_NAME "bnx2" -#define DRV_MODULE_VERSION "2.0.21" -#define DRV_MODULE_RELDATE "Dec 23, 2010" +#define DRV_MODULE_VERSION "2.1.6" +#define DRV_MODULE_RELDATE "Mar 7, 2011" #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw" #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" -#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1.fw" +#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw" #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw" #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw" @@ -435,7 +435,8 @@ bnx2_cnic_stop(struct bnx2 *bp) struct cnic_ctl_info info; mutex_lock(&bp->cnic_lock); - c_ops = bp->cnic_ops; + c_ops = rcu_dereference_protected(bp->cnic_ops, + lockdep_is_held(&bp->cnic_lock)); if (c_ops) { info.cmd = CNIC_CTL_STOP_CMD; c_ops->cnic_ctl(bp->cnic_data, &info); @@ -450,7 +451,8 @@ bnx2_cnic_start(struct bnx2 *bp) struct cnic_ctl_info info; mutex_lock(&bp->cnic_lock); - c_ops = bp->cnic_ops; + c_ops = rcu_dereference_protected(bp->cnic_ops, + lockdep_is_held(&bp->cnic_lock)); if (c_ops) { if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; @@ -8315,7 +8317,7 @@ static const struct net_device_ops bnx2_netdev_ops = { #endif }; -static void inline vlan_features_add(struct net_device *dev, unsigned long flags) +static void inline vlan_features_add(struct net_device *dev, u32 flags) { dev->vlan_features |= flags; } diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index f459fb2f9add..68020451dc4f 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -1,6 +1,6 @@ /* bnx2.h: Broadcom NX2 network driver. * - * Copyright (c) 2004-2009 Broadcom Corporation + * Copyright (c) 2004-2011 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -6207,6 +6207,8 @@ struct l2_fhdr { #define BNX2_CP_SCRATCH 0x001a0000 +#define BNX2_FW_MAX_ISCSI_CONN 0x001a0080 + /* * mcp_reg definition @@ -6759,7 +6761,7 @@ struct bnx2 { u32 tx_wake_thresh; #ifdef BCM_CNIC - struct cnic_ops *cnic_ops; + struct cnic_ops __rcu *cnic_ops; void *cnic_data; #endif diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 8849699c66c4..b7ff87b35fbb 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -22,8 +22,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.62.00-6" -#define DRV_MODULE_RELDATE "2011/01/30" +#define DRV_MODULE_VERSION "1.62.11-0" +#define DRV_MODULE_RELDATE "2011/01/31" #define BNX2X_BC_VER 0x040200 #define BNX2X_MULTI_QUEUE @@ -31,7 +31,7 @@ #define BNX2X_NEW_NAPI #if defined(CONFIG_DCB) -#define BCM_DCB +#define BCM_DCBNL #endif #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) #define BCM_CNIC 1 @@ -129,6 +129,7 @@ void bnx2x_panic_dump(struct bnx2x *bp); #endif #define bnx2x_mc_addr(ha) ((ha)->addr) +#define bnx2x_uc_addr(ha) ((ha)->addr) #define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff) #define U64_HI(x) (u32)(((u64)(x)) >> 32) @@ -341,6 +342,8 @@ struct bnx2x_fastpath { /* chip independed shortcut into rx_prods_offset memory */ u32 ustorm_rx_prods_offset; + u32 rx_buf_size; + dma_addr_t status_blk_mapping; struct sw_tx_bd *tx_buf_ring; @@ -428,6 +431,10 @@ struct bnx2x_fastpath { }; #define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) + +/* Use 2500 as a mini-jumbo MTU for FCoE */ +#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 + #ifdef BCM_CNIC /* FCoE L2 `fastpath' is right after the eth entries */ #define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) @@ -810,6 +817,7 @@ struct bnx2x_slowpath { struct eth_stats_query fw_stats; struct mac_configuration_cmd mac_config; struct mac_configuration_cmd mcast_config; + struct mac_configuration_cmd uc_mac_config; struct client_init_ramrod_data client_init_data; /* used by dmae command executer */ @@ -911,7 +919,6 @@ struct bnx2x { int tx_ring_size; u32 rx_csum; - u32 rx_buf_size; /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ #define ETH_OVREHEAD (ETH_HLEN + 8 + 8) #define ETH_MIN_PACKET_SIZE 60 @@ -939,7 +946,7 @@ struct bnx2x { struct eth_spe *spq_prod_bd; struct eth_spe *spq_last_bd; __le16 *dsb_sp_prod; - atomic_t spq_left; /* serialize spq */ + atomic_t cq_spq_left; /* ETH_XXX ramrods credit */ /* used to synchronize spq accesses */ spinlock_t spq_lock; @@ -949,6 +956,7 @@ struct bnx2x { u16 eq_prod; u16 eq_cons; __le16 *eq_cons_sb; + atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */ /* Flags for marking that there is a STAT_QUERY or SET_MAC ramrod pending */ @@ -976,8 +984,12 @@ struct bnx2x { #define MF_FUNC_DIS 0x1000 #define FCOE_MACS_SET 0x2000 #define NO_FCOE_FLAG 0x4000 +#define NO_ISCSI_OOO_FLAG 0x8000 +#define NO_ISCSI_FLAG 0x10000 #define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG) +#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) +#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) int pf_num; /* absolute PF number */ int pfid; /* per-path PF number */ @@ -1064,6 +1076,7 @@ struct bnx2x { int num_queues; int disable_tpa; int int_mode; + u32 *rx_indir_table; struct tstorm_eth_mac_filter_config mac_filters; #define BNX2X_ACCEPT_NONE 0x0000 @@ -1110,7 +1123,7 @@ struct bnx2x { #define BNX2X_CNIC_FLAG_MAC_SET 1 void *t2; dma_addr_t t2_mapping; - struct cnic_ops *cnic_ops; + struct cnic_ops __rcu *cnic_ops; void *cnic_data; u32 cnic_tag; struct cnic_eth_dev cnic_eth_dev; @@ -1125,13 +1138,12 @@ struct bnx2x { u16 cnic_kwq_pending; u16 cnic_spq_pending; struct mutex cnic_mutex; - u8 iscsi_mac[ETH_ALEN]; u8 fip_mac[ETH_ALEN]; #endif int dmae_ready; /* used to synchronize dmae accesses */ - struct mutex dmae_mutex; + spinlock_t dmae_lock; /* used to protect the FW mail box */ struct mutex fw_mb_mutex; @@ -1448,6 +1460,12 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param); void bnx2x_calc_fc_adv(struct bnx2x *bp); int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, u32 data_hi, u32 data_lo, int common); + +/* Clears multicast and unicast list configuration in the chip. */ +void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp); +void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp); +void bnx2x_invalidate_uc_list(struct bnx2x *bp); + void bnx2x_update_coalesce(struct bnx2x *bp); int bnx2x_get_link_cfg_idx(struct bnx2x *bp); @@ -1787,5 +1805,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */ extern void bnx2x_set_ethtool_ops(struct net_device *netdev); +void bnx2x_push_indir_table(struct bnx2x *bp); #endif /* bnx2x.h */ diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index a71b32940533..e83ac6dd6fc0 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -232,7 +232,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, /* move empty skb from pool to prod and map it */ prod_rx_buf->skb = fp->tpa_pool[queue].skb; mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data, - bp->rx_buf_size, DMA_FROM_DEVICE); + fp->rx_buf_size, DMA_FROM_DEVICE); dma_unmap_addr_set(prod_rx_buf, mapping, mapping); /* move partial skb from cons to pool (don't unmap yet) */ @@ -367,13 +367,13 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue]; struct sk_buff *skb = rx_buf->skb; /* alloc new skb */ - struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); + struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); /* Unmap skb in the pool anyway, as we are going to change pool entry status to BNX2X_TPA_STOP even if new skb allocation fails. */ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, DMA_FROM_DEVICE); + fp->rx_buf_size, DMA_FROM_DEVICE); if (likely(new_skb)) { /* fix ip xsum and give it to the stack */ @@ -385,10 +385,10 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, prefetch(((char *)(skb)) + L1_CACHE_BYTES); #ifdef BNX2X_STOP_ON_ERROR - if (pad + len > bp->rx_buf_size) { + if (pad + len > fp->rx_buf_size) { BNX2X_ERR("skb_put is about to fail... " "pad %d len %d rx_buf_size %d\n", - pad, len, bp->rx_buf_size); + pad, len, fp->rx_buf_size); bnx2x_panic(); return; } @@ -618,7 +618,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, + fp->rx_buf_size, DMA_FROM_DEVICE); skb_reserve(skb, pad); skb_put(skb, len); @@ -858,19 +858,16 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) u16 ring_prod; int i, j; - bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN + - IP_HEADER_ALIGNMENT_PADDING; - - DP(NETIF_MSG_IFUP, - "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); - for_each_rx_queue(bp, j) { struct bnx2x_fastpath *fp = &bp->fp[j]; + DP(NETIF_MSG_IFUP, + "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); + if (!fp->disable_tpa) { for (i = 0; i < max_agg_queues; i++) { fp->tpa_pool[i].skb = - netdev_alloc_skb(bp->dev, bp->rx_buf_size); + netdev_alloc_skb(bp->dev, fp->rx_buf_size); if (!fp->tpa_pool[i].skb) { BNX2X_ERR("Failed to allocate TPA " "skb pool for queue[%d] - " @@ -978,7 +975,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, DMA_FROM_DEVICE); + fp->rx_buf_size, DMA_FROM_DEVICE); rx_buf->skb = NULL; dev_kfree_skb(skb); @@ -1303,6 +1300,31 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) return rc; } +static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) +{ + int i; + + for_each_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + + /* Always use a mini-jumbo MTU for the FCoE L2 ring */ + if (IS_FCOE_IDX(i)) + /* + * Although there are no IP frames expected to arrive to + * this ring we still want to add an + * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer + * overrun attack. + */ + fp->rx_buf_size = + BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD + + BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING; + else + fp->rx_buf_size = + bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN + + IP_HEADER_ALIGNMENT_PADDING; + } +} + /* must be called with rtnl_lock */ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) { @@ -1326,6 +1348,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* must be called before memory allocation and HW init */ bnx2x_ilt_set_info(bp); + /* Set the receive queues buffer size */ + bnx2x_set_rx_buf_size(bp); + if (bnx2x_alloc_mem(bp)) return -ENOMEM; @@ -1481,6 +1506,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bnx2x_set_eth_mac(bp, 1); + /* Clear MC configuration */ + if (CHIP_IS_E1(bp)) + bnx2x_invalidate_e1_mc_list(bp); + else + bnx2x_invalidate_e1h_mc_list(bp); + + /* Clear UC lists configuration */ + bnx2x_invalidate_uc_list(bp); + if (bp->pending_max) { bnx2x_update_max_mf_config(bp, bp->pending_max); bp->pending_max = 0; @@ -1489,25 +1523,23 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if (bp->port.pmf) bnx2x_initial_phy_init(bp, load_mode); + /* Initialize Rx filtering */ + bnx2x_set_rx_mode(bp->dev); + /* Start fast path */ switch (load_mode) { case LOAD_NORMAL: /* Tx queue should be only reenabled */ netif_tx_wake_all_queues(bp->dev); /* Initialize the receive filter. */ - bnx2x_set_rx_mode(bp->dev); break; case LOAD_OPEN: netif_tx_start_all_queues(bp->dev); smp_mb__after_clear_bit(); - /* Initialize the receive filter. */ - bnx2x_set_rx_mode(bp->dev); break; case LOAD_DIAG: - /* Initialize the receive filter. */ - bnx2x_set_rx_mode(bp->dev); bp->state = BNX2X_STATE_DIAG; break; diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 85ea7f26b51f..ef37b98d6146 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h @@ -831,11 +831,11 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp, struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; dma_addr_t mapping; - skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); + skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size); if (unlikely(skb == NULL)) return -ENOMEM; - mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size, + mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { dev_kfree_skb(skb); @@ -901,7 +901,7 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp, if (fp->tpa_state[i] == BNX2X_TPA_START) dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_size, DMA_FROM_DEVICE); + fp->rx_buf_size, DMA_FROM_DEVICE); dev_kfree_skb(skb); rx_buf->skb = NULL; diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index fb60021f81fb..9a24d79c71d9 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c @@ -19,6 +19,9 @@ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/errno.h> +#ifdef BCM_DCBNL +#include <linux/dcbnl.h> +#endif #include "bnx2x.h" #include "bnx2x_cmn.h" @@ -508,13 +511,75 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp) return 0; } + +#ifdef BCM_DCBNL +static inline +u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent) +{ + u8 pri; + + /* Choose the highest priority */ + for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--) + if (ent->pri_bitmap & (1 << pri)) + break; + return pri; +} + +static inline +u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent) +{ + return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) == + DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM : + DCB_APP_IDTYPE_ETHTYPE; +} + +static inline +void bnx2x_dcbx_invalidate_local_apps(struct bnx2x *bp) +{ + int i; + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) + bp->dcbx_local_feat.app.app_pri_tbl[i].appBitfield &= + ~DCBX_APP_ENTRY_VALID; +} + +int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall) +{ + int i, err = 0; + + for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) { + struct dcbx_app_priority_entry *ent = + &bp->dcbx_local_feat.app.app_pri_tbl[i]; + + if (ent->appBitfield & DCBX_APP_ENTRY_VALID) { + u8 up = bnx2x_dcbx_dcbnl_app_up(ent); + + /* avoid invalid user-priority */ + if (up) { + struct dcb_app app; + app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent); + app.protocol = ent->app_id; + app.priority = delall ? 0 : up; + err = dcb_setapp(bp->dev, &app); + } + } + } + return err; +} +#endif + void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) { switch (state) { case BNX2X_DCBX_STATE_NEG_RECEIVED: { DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n"); - +#ifdef BCM_DCBNL + /** + * Delete app tlvs from dcbnl before reading new + * negotiation results + */ + bnx2x_dcbnl_update_applist(bp, true); +#endif /* Read neg results if dcbx is in the FW */ if (bnx2x_dcbx_read_shmem_neg_results(bp)) return; @@ -526,10 +591,24 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) bp->dcbx_error); if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) { +#ifdef BCM_DCBNL + /** + * Add new app tlvs to dcbnl + */ + bnx2x_dcbnl_update_applist(bp, false); +#endif bnx2x_dcbx_stop_hw_tx(bp); return; } /* fall through */ +#ifdef BCM_DCBNL + /** + * Invalidate the local app tlvs if they are not added + * to the dcbnl app list to avoid deleting them from + * the list later on + */ + bnx2x_dcbx_invalidate_local_apps(bp); +#endif } case BNX2X_DCBX_STATE_TX_PAUSED: DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n"); @@ -1505,8 +1584,7 @@ static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp) bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg); } /* DCB netlink */ -#ifdef BCM_DCB -#include <linux/dcbnl.h> +#ifdef BCM_DCBNL #define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \ DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC) @@ -1816,32 +1894,6 @@ static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state) bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0); } -static bool bnx2x_app_is_equal(struct dcbx_app_priority_entry *app_ent, - u8 idtype, u16 idval) -{ - if (!(app_ent->appBitfield & DCBX_APP_ENTRY_VALID)) - return false; - - switch (idtype) { - case DCB_APP_IDTYPE_ETHTYPE: - if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) != - DCBX_APP_SF_ETH_TYPE) - return false; - break; - case DCB_APP_IDTYPE_PORTNUM: - if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) != - DCBX_APP_SF_PORT) - return false; - break; - default: - return false; - } - if (app_ent->app_id != idval) - return false; - - return true; -} - static void bnx2x_admin_app_set_ent( struct bnx2x_admin_priority_app_table *app_ent, u8 idtype, u16 idval, u8 up) @@ -1943,30 +1995,6 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype, return bnx2x_set_admin_app_up(bp, idtype, idval, up); } -static u8 bnx2x_dcbnl_get_app_up(struct net_device *netdev, u8 idtype, - u16 idval) -{ - int i; - u8 up = 0; - - struct bnx2x *bp = netdev_priv(netdev); - DP(NETIF_MSG_LINK, "app_type %d, app_id 0x%x\n", idtype, idval); - - /* iterate over the app entries looking for idtype and idval */ - for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) - if (bnx2x_app_is_equal(&bp->dcbx_local_feat.app.app_pri_tbl[i], - idtype, idval)) - break; - - if (i < DCBX_MAX_APP_PROTOCOL) - /* if found return up */ - up = bp->dcbx_local_feat.app.app_pri_tbl[i].pri_bitmap; - else - DP(NETIF_MSG_LINK, "app not found\n"); - - return up; -} - static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev) { struct bnx2x *bp = netdev_priv(netdev); @@ -2107,7 +2135,6 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = { .setnumtcs = bnx2x_dcbnl_set_numtcs, .getpfcstate = bnx2x_dcbnl_get_pfc_state, .setpfcstate = bnx2x_dcbnl_set_pfc_state, - .getapp = bnx2x_dcbnl_get_app_up, .setapp = bnx2x_dcbnl_set_app_up, .getdcbx = bnx2x_dcbnl_get_dcbx, .setdcbx = bnx2x_dcbnl_set_dcbx, @@ -2115,4 +2142,4 @@ const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = { .setfeatcfg = bnx2x_dcbnl_set_featcfg, }; -#endif /* BCM_DCB */ +#endif /* BCM_DCBNL */ diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h index f650f98e4092..71b8eda43bd0 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.h +++ b/drivers/net/bnx2x/bnx2x_dcb.h @@ -189,8 +189,9 @@ enum { void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state); /* DCB netlink */ -#ifdef BCM_DCB +#ifdef BCM_DCBNL extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops; -#endif /* BCM_DCB */ +int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall); +#endif /* BCM_DCBNL */ #endif /* BNX2X_DCB_H */ diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 7e92f9d0dcfd..f5050155c6b5 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c @@ -1617,7 +1617,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up) /* prepare the loopback packet */ pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ? bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN); - skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); + skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size); if (!skb) { rc = -ENOMEM; goto test_loopback_exit; @@ -2131,6 +2131,59 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data) return 0; } +static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + void *rules __always_unused) +{ + struct bnx2x *bp = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = BNX2X_NUM_ETH_QUEUES(bp); + return 0; + + default: + return -EOPNOTSUPP; + } +} + +static int bnx2x_get_rxfh_indir(struct net_device *dev, + struct ethtool_rxfh_indir *indir) +{ + struct bnx2x *bp = netdev_priv(dev); + size_t copy_size = + min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE); + + if (bp->multi_mode == ETH_RSS_MODE_DISABLED) + return -EOPNOTSUPP; + + indir->size = TSTORM_INDIRECTION_TABLE_SIZE; + memcpy(indir->ring_index, bp->rx_indir_table, + copy_size * sizeof(bp->rx_indir_table[0])); + return 0; +} + +static int bnx2x_set_rxfh_indir(struct net_device *dev, + const struct ethtool_rxfh_indir *indir) +{ + struct bnx2x *bp = netdev_priv(dev); + size_t i; + + if (bp->multi_mode == ETH_RSS_MODE_DISABLED) + return -EOPNOTSUPP; + + /* Validate size and indices */ + if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE) + return -EINVAL; + for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) + if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp)) + return -EINVAL; + + memcpy(bp->rx_indir_table, indir->ring_index, + indir->size * sizeof(bp->rx_indir_table[0])); + bnx2x_push_indir_table(bp); + return 0; +} + static const struct ethtool_ops bnx2x_ethtool_ops = { .get_settings = bnx2x_get_settings, .set_settings = bnx2x_set_settings, @@ -2167,6 +2220,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .get_strings = bnx2x_get_strings, .phys_id = bnx2x_phys_id, .get_ethtool_stats = bnx2x_get_ethtool_stats, + .get_rxnfc = bnx2x_get_rxnfc, + .get_rxfh_indir = bnx2x_get_rxfh_indir, + .set_rxfh_indir = bnx2x_set_rxfh_indir, }; void bnx2x_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h index 548f5631c0dc..be503cc0a50b 100644 --- a/drivers/net/bnx2x/bnx2x_hsi.h +++ b/drivers/net/bnx2x/bnx2x_hsi.h @@ -11,20 +11,27 @@ #include "bnx2x_fw_defs.h" +#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e + struct license_key { u32 reserved[6]; -#if defined(__BIG_ENDIAN) - u16 max_iscsi_init_conn; - u16 max_iscsi_trgt_conn; -#elif defined(__LITTLE_ENDIAN) - u16 max_iscsi_trgt_conn; - u16 max_iscsi_init_conn; -#endif + u32 max_iscsi_conn; +#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK 0xFFFF +#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT 0 +#define BNX2X_MAX_ISCSI_INIT_CONN_MASK 0xFFFF0000 +#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT 16 - u32 reserved_a[6]; -}; + u32 reserved_a; + + u32 max_fcoe_conn; +#define BNX2X_MAX_FCOE_TRGT_CONN_MASK 0xFFFF +#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT 0 +#define BNX2X_MAX_FCOE_INIT_CONN_MASK 0xFFFF0000 +#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT 16 + u32 reserved_b[4]; +}; #define PORT_0 0 #define PORT_1 1 @@ -237,8 +244,26 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT 16 - u32 Reserved0[16]; /* 0x158 */ - + u32 Reserved0[3]; /* 0x158 */ + /* Controls the TX laser of the SFP+ module */ + u32 sfp_ctrl; /* 0x164 */ +#define PORT_HW_CFG_TX_LASER_MASK 0x000000FF +#define PORT_HW_CFG_TX_LASER_SHIFT 0 +#define PORT_HW_CFG_TX_LASER_MDIO 0x00000000 +#define PORT_HW_CFG_TX_LASER_GPIO0 0x00000001 +#define PORT_HW_CFG_TX_LASER_GPIO1 0x00000002 +#define PORT_HW_CFG_TX_LASER_GPIO2 0x00000003 +#define PORT_HW_CFG_TX_LASER_GPIO3 0x00000004 + + /* Controls the fault module LED of the SFP+ */ +#define PORT_HW_CFG_FAULT_MODULE_LED_MASK 0x0000FF00 +#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT 8 +#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0 0x00000000 +#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1 0x00000100 +#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2 0x00000200 +#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3 0x00000300 +#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED 0x00000400 + u32 Reserved01[12]; /* 0x158 */ /* for external PHY, or forced mode or during AN */ u16 xgxs_config_rx[4]; /* 0x198 */ @@ -246,12 +271,78 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ u32 Reserved1[56]; /* 0x1A8 */ u32 default_cfg; /* 0x288 */ +#define PORT_HW_CFG_GPIO0_CONFIG_MASK 0x00000003 +#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT 0 +#define PORT_HW_CFG_GPIO0_CONFIG_NA 0x00000000 +#define PORT_HW_CFG_GPIO0_CONFIG_LOW 0x00000001 +#define PORT_HW_CFG_GPIO0_CONFIG_HIGH 0x00000002 +#define PORT_HW_CFG_GPIO0_CONFIG_INPUT 0x00000003 + +#define PORT_HW_CFG_GPIO1_CONFIG_MASK 0x0000000C +#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT 2 +#define PORT_HW_CFG_GPIO1_CONFIG_NA 0x00000000 +#define PORT_HW_CFG_GPIO1_CONFIG_LOW 0x00000004 +#define PORT_HW_CFG_GPIO1_CONFIG_HIGH 0x00000008 +#define PORT_HW_CFG_GPIO1_CONFIG_INPUT 0x0000000c + +#define PORT_HW_CFG_GPIO2_CONFIG_MASK 0x00000030 +#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT 4 +#define PORT_HW_CFG_GPIO2_CONFIG_NA 0x00000000 +#define PORT_HW_CFG_GPIO2_CONFIG_LOW 0x00000010 +#define PORT_HW_CFG_GPIO2_CONFIG_HIGH 0x00000020 +#define PORT_HW_CFG_GPIO2_CONFIG_INPUT 0x00000030 + +#define PORT_HW_CFG_GPIO3_CONFIG_MASK 0x000000C0 +#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT 6 +#define PORT_HW_CFG_GPIO3_CONFIG_NA 0x00000000 +#define PORT_HW_CFG_GPIO3_CONFIG_LOW 0x00000040 +#define PORT_HW_CFG_GPIO3_CONFIG_HIGH 0x00000080 +#define PORT_HW_CFG_GPIO3_CONFIG_INPUT 0x000000c0 + + /* + * When KR link is required to be set to force which is not + * KR-compliant, this parameter determine what is the trigger for it. + * When GPIO is selected, low input will force the speed. Currently + * default speed is 1G. In the future, it may be widen to select the + * forced speed in with another parameter. Note when force-1G is + * enabled, it override option 56: Link Speed option. + */ +#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK 0x00000F00 +#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT 8 +#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED 0x00000000 +#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0 0x00000100 +#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0 0x00000200 +#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0 0x00000300 +#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0 0x00000400 +#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1 0x00000500 +#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1 0x00000600 +#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1 0x00000700 +#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1 0x00000800 +#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED 0x00000900 + /* Enable to determine with which GPIO to reset the external phy */ +#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK 0x000F0000 +#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT 16 +#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE 0x00000000 +#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0 0x00010000 +#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0 0x00020000 +#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0 0x00030000 +#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0 0x00040000 +#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1 0x00050000 +#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1 0x00060000 +#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1 0x00070000 +#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1 0x00080000 /* Enable BAM on KR */ #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 + /* Enable Common Mode Sense */ +#define PORT_HW_CFG_ENABLE_CMS_MASK 0x00200000 +#define PORT_HW_CFG_ENABLE_CMS_SHIFT 21 +#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 +#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 + u32 speed_capability_mask2; /* 0x28C */ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 @@ -381,6 +472,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00 +#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833 0x00000d00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index dd1210fddfff..f2f367d4e74d 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -1,4 +1,4 @@ -/* Copyright 2008-2009 Broadcom Corporation +/* Copyright 2008-2011 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -28,12 +28,13 @@ /********************************************************/ #define ETH_HLEN 14 -#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */ +/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ +#define ETH_OVREHEAD (ETH_HLEN + 8 + 8) #define ETH_MIN_PACKET_SIZE 60 #define ETH_MAX_PACKET_SIZE 1500 #define ETH_MAX_JUMBO_PACKET_SIZE 9600 #define MDIO_ACCESS_TIMEOUT 1000 -#define BMAC_CONTROL_RX_ENABLE 2 +#define BMAC_CONTROL_RX_ENABLE 2 /***********************************************************/ /* Shortcut definitions */ @@ -79,7 +80,7 @@ #define AUTONEG_CL37 SHARED_HW_CFG_AN_ENABLE_CL37 #define AUTONEG_CL73 SHARED_HW_CFG_AN_ENABLE_CL73 -#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM +#define AUTONEG_BAM SHARED_HW_CFG_AN_ENABLE_BAM #define AUTONEG_PARALLEL \ SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION #define AUTONEG_SGMII_FIBER_AUTODET \ @@ -112,10 +113,10 @@ #define GP_STATUS_10G_KX4 \ MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4 -#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD -#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD +#define LINK_10THD LINK_STATUS_SPEED_AND_DUPLEX_10THD +#define LINK_10TFD LINK_STATUS_SPEED_AND_DUPLEX_10TFD #define LINK_100TXHD LINK_STATUS_SPEED_AND_DUPLEX_100TXHD -#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 +#define LINK_100T4 LINK_STATUS_SPEED_AND_DUPLEX_100T4 #define LINK_100TXFD LINK_STATUS_SPEED_AND_DUPLEX_100TXFD #define LINK_1000THD LINK_STATUS_SPEED_AND_DUPLEX_1000THD #define LINK_1000TFD LINK_STATUS_SPEED_AND_DUPLEX_1000TFD @@ -123,18 +124,18 @@ #define LINK_2500THD LINK_STATUS_SPEED_AND_DUPLEX_2500THD #define LINK_2500TFD LINK_STATUS_SPEED_AND_DUPLEX_2500TFD #define LINK_2500XFD LINK_STATUS_SPEED_AND_DUPLEX_2500XFD -#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD -#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD -#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD -#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD +#define LINK_10GTFD LINK_STATUS_SPEED_AND_DUPLEX_10GTFD +#define LINK_10GXFD LINK_STATUS_SPEED_AND_DUPLEX_10GXFD +#define LINK_12GTFD LINK_STATUS_SPEED_AND_DUPLEX_12GTFD +#define LINK_12GXFD LINK_STATUS_SPEED_AND_DUPLEX_12GXFD #define LINK_12_5GTFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD #define LINK_12_5GXFD LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD -#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD -#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD -#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD -#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD -#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD -#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD +#define LINK_13GTFD LINK_STATUS_SPEED_AND_DUPLEX_13GTFD +#define LINK_13GXFD LINK_STATUS_SPEED_AND_DUPLEX_13GXFD +#define LINK_15GTFD LINK_STATUS_SPEED_AND_DUPLEX_15GTFD +#define LINK_15GXFD LINK_STATUS_SPEED_AND_DUPLEX_15GXFD +#define LINK_16GTFD LINK_STATUS_SPEED_AND_DUPLEX_16GTFD +#define LINK_16GXFD LINK_STATUS_SPEED_AND_DUPLEX_16GXFD #define PHY_XGXS_FLAG 0x1 #define PHY_SGMII_FLAG 0x2 @@ -142,7 +143,7 @@ /* */ #define SFP_EEPROM_CON_TYPE_ADDR 0x2 - #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 + #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 @@ -153,15 +154,15 @@ #define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 - #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 + #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE 0x8 -#define SFP_EEPROM_OPTIONS_ADDR 0x40 +#define SFP_EEPROM_OPTIONS_ADDR 0x40 #define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1 -#define SFP_EEPROM_OPTIONS_SIZE 2 +#define SFP_EEPROM_OPTIONS_SIZE 2 -#define EDC_MODE_LINEAR 0x0022 -#define EDC_MODE_LIMITING 0x0044 -#define EDC_MODE_PASSIVE_DAC 0x0055 +#define EDC_MODE_LINEAR 0x0022 +#define EDC_MODE_LIMITING 0x0044 +#define EDC_MODE_PASSIVE_DAC 0x0055 #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000) @@ -170,24 +171,18 @@ /* INTERFACE */ /**********************************************************/ -#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ +#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ bnx2x_cl45_write(_bp, _phy, \ (_phy)->def_md_devad, \ (_bank + (_addr & 0xf)), \ _val) -#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \ +#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \ bnx2x_cl45_read(_bp, _phy, \ (_phy)->def_md_devad, \ (_bank + (_addr & 0xf)), \ _val) -static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, - u8 devad, u16 reg, u16 *ret_val); - -static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, - u8 devad, u16 reg, u16 val); - static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits) { u32 val = REG_RD(bp, reg); @@ -216,7 +211,7 @@ void bnx2x_ets_disabled(struct link_params *params) DP(NETIF_MSG_LINK, "ETS disabled configuration\n"); - /** + /* * mapping between entry priority to client number (0,1,2 -debug and * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) * 3bits client num. @@ -225,7 +220,7 @@ void bnx2x_ets_disabled(struct link_params *params) */ REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); - /** + /* * Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, 3 - * COS0 entry, 4 - COS1 entry. @@ -237,12 +232,12 @@ void bnx2x_ets_disabled(struct link_params *params) REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); /* defines which entries (clients) are subjected to WFQ arbitration */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); - /** - * For strict priority entries defines the number of consecutive - * slots for the highest priority. - */ + /* + * For strict priority entries defines the number of consecutive + * slots for the highest priority. + */ REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); - /** + /* * mapping between the CREDIT_WEIGHT registers and actual client * numbers */ @@ -255,7 +250,7 @@ void bnx2x_ets_disabled(struct link_params *params) REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); /* ETS mode disable */ REG_WR(bp, PBF_REG_ETS_ENABLED, 0); - /** + /* * If ETS mode is enabled (there is no strict priority) defines a WFQ * weight for COS0/COS1. */ @@ -268,24 +263,24 @@ void bnx2x_ets_disabled(struct link_params *params) REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); } -void bnx2x_ets_bw_limit_common(const struct link_params *params) +static void bnx2x_ets_bw_limit_common(const struct link_params *params) { /* ETS disabled configuration */ struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); - /** - * defines which entries (clients) are subjected to WFQ arbitration - * COS0 0x8 - * COS1 0x10 - */ + /* + * defines which entries (clients) are subjected to WFQ arbitration + * COS0 0x8 + * COS1 0x10 + */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); - /** - * mapping between the ARB_CREDIT_WEIGHT registers and actual - * client numbers (WEIGHT_0 does not actually have to represent - * client 0) - * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 - * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 - */ + /* + * mapping between the ARB_CREDIT_WEIGHT registers and actual + * client numbers (WEIGHT_0 does not actually have to represent + * client 0) + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010 + */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A); REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, @@ -298,14 +293,14 @@ void bnx2x_ets_bw_limit_common(const struct link_params *params) /* Defines the number of consecutive slots for the strict priority */ REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); - /** - * Bitmap of 5bits length. Each bit specifies whether the entry behaves - * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 - * entry, 4 - COS1 entry. - * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT - * bit4 bit3 bit2 bit1 bit0 - * MCP and debug are strict - */ + /* + * Bitmap of 5bits length. Each bit specifies whether the entry behaves + * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 + * entry, 4 - COS1 entry. + * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT + * bit4 bit3 bit2 bit1 bit0 + * MCP and debug are strict + */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/ @@ -329,8 +324,7 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, if ((0 == total_bw) || (0 == cos0_bw) || (0 == cos1_bw)) { - DP(NETIF_MSG_LINK, - "bnx2x_ets_bw_limit: Total BW can't be zero\n"); + DP(NETIF_MSG_LINK, "Total BW can't be zero\n"); return; } @@ -355,7 +349,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) u32 val = 0; DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); - /** + /* * Bitmap of 5bits length. Each bit specifies whether the entry behaves * as strict. Bits 0,1,2 - debug and management entries, * 3 - COS0 entry, 4 - COS1 entry. @@ -364,7 +358,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) * MCP and debug are strict */ REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); - /** + /* * For strict priority entries defines the number of consecutive slots * for the highest priority. */ @@ -377,14 +371,14 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) /* Defines the number of consecutive slots for the strict priority */ REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); - /** - * mapping between entry priority to client number (0,1,2 -debug and - * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) - * 3bits client num. - * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 - * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 - * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 - */ + /* + * mapping between entry priority to client number (0,1,2 -debug and + * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) + * 3bits client num. + * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 + * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000 + * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000 + */ val = (0 == strict_cos) ? 0x2318 : 0x22E0; REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val); @@ -471,7 +465,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars, /* MAC/PBF section */ /******************************************************************/ static void bnx2x_emac_init(struct link_params *params, - struct link_vars *vars) + struct link_vars *vars) { /* reset and unreset the emac core */ struct bnx2x *bp = params->bp; @@ -481,10 +475,10 @@ static void bnx2x_emac_init(struct link_params *params, u16 timeout; REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); udelay(5); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, - (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); + (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port)); /* init emac - use read-modify-write */ /* self clear reset */ @@ -515,7 +509,7 @@ static void bnx2x_emac_init(struct link_params *params, } static u8 bnx2x_emac_enable(struct link_params *params, - struct link_vars *vars, u8 lb) + struct link_vars *vars, u8 lb) { struct bnx2x *bp = params->bp; u8 port = params->port; @@ -527,55 +521,33 @@ static u8 bnx2x_emac_enable(struct link_params *params, /* enable emac and not bmac */ REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1); - /* for paladium */ - if (CHIP_REV_IS_EMUL(bp)) { - /* Use lane 1 (of lanes 0-3) */ - REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1); - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + - port*4, 1); - } - /* for fpga */ - else - - if (CHIP_REV_IS_FPGA(bp)) { - /* Use lane 1 (of lanes 0-3) */ - DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n"); - - REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1); - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, - 0); - } else /* ASIC */ if (vars->phy_flags & PHY_XGXS_FLAG) { u32 ser_lane = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); DP(NETIF_MSG_LINK, "XGXS\n"); /* select the master lanes (out of 0-3) */ - REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + - port*4, ser_lane); + REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane); /* select XGXS */ - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + - port*4, 1); + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1); } else { /* SerDes */ DP(NETIF_MSG_LINK, "SerDes\n"); /* select SerDes */ - REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + - port*4, 0); + REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0); } bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE, - EMAC_RX_MODE_RESET); + EMAC_RX_MODE_RESET); bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, - EMAC_TX_MODE_RESET); + EMAC_TX_MODE_RESET); if (CHIP_REV_IS_SLOW(bp)) { /* config GMII mode */ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); - EMAC_WR(bp, EMAC_REG_EMAC_MODE, - (val | EMAC_MODE_PORT_GMII)); + EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII)); } else { /* ASIC */ /* pause enable/disable */ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, @@ -605,14 +577,14 @@ static u8 bnx2x_emac_enable(struct link_params *params, val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; - /** - * Setting this bit causes MAC control frames (except for pause - * frames) to be passed on for processing. This setting has no - * affect on the operation of the pause frames. This bit effects - * all packets regardless of RX Parser packet sorting logic. - * Turn the PFC off to make sure we are in Xon state before - * enabling it. - */ + /* + * Setting this bit causes MAC control frames (except for pause + * frames) to be passed on for processing. This setting has no + * affect on the operation of the pause frames. This bit effects + * all packets regardless of RX Parser packet sorting logic. + * Turn the PFC off to make sure we are in Xon state before + * enabling it. + */ EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0); if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) { DP(NETIF_MSG_LINK, "PFC is enabled\n"); @@ -666,16 +638,7 @@ static u8 bnx2x_emac_enable(struct link_params *params, REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1); - if (CHIP_REV_IS_EMUL(bp)) { - /* take the BigMac out of reset */ - REG_WR(bp, - GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - - /* enable access for bmac registers */ - REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); - } else - REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0); + REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0); vars->mac_type = MAC_TYPE_EMAC; return 0; @@ -731,8 +694,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, val |= (1<<5); wb_data[0] = val; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2); udelay(30); /* Tx control */ @@ -768,12 +730,12 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); - /** - * Set Time (based unit is 512 bit time) between automatic - * re-sending of PP packets amd enable automatic re-send of - * Per-Priroity Packet as long as pp_gen is asserted and - * pp_disable is low. - */ + /* + * Set Time (based unit is 512 bit time) between automatic + * re-sending of PP packets amd enable automatic re-send of + * Per-Priroity Packet as long as pp_gen is asserted and + * pp_disable is low. + */ val = 0x8000; if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) val |= (1<<16); /* enable automatic re-send */ @@ -781,7 +743,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, wb_data[0] = val; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, - wb_data, 2); + wb_data, 2); /* mac control */ val = 0x3; /* Enable RX and TX */ @@ -795,8 +757,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, wb_data[0] = val; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); } static void bnx2x_update_pfc_brb(struct link_params *params, @@ -825,17 +786,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params, full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; } - /* The number of free blocks below which the pause signal to class 0 - of MAC #n is asserted. n=0,1 */ + /* + * The number of free blocks below which the pause signal to class 0 + * of MAC #n is asserted. n=0,1 + */ REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th); - /* The number of free blocks above which the pause signal to class 0 - of MAC #n is de-asserted. n=0,1 */ + /* + * The number of free blocks above which the pause signal to class 0 + * of MAC #n is de-asserted. n=0,1 + */ REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th); - /* The number of free blocks below which the full signal to class 0 - of MAC #n is asserted. n=0,1 */ + /* + * The number of free blocks below which the full signal to class 0 + * of MAC #n is asserted. n=0,1 + */ REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th); - /* The number of free blocks above which the full signal to class 0 - of MAC #n is de-asserted. n=0,1 */ + /* + * The number of free blocks above which the full signal to class 0 + * of MAC #n is de-asserted. n=0,1 + */ REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th); if (set_pfc && pfc_params) { @@ -859,25 +828,25 @@ static void bnx2x_update_pfc_brb(struct link_params *params, full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE; } - /** + /* * The number of free blocks below which the pause signal to * class 1 of MAC #n is asserted. n=0,1 - **/ + */ REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th); - /** + /* * The number of free blocks above which the pause signal to * class 1 of MAC #n is de-asserted. n=0,1 - **/ + */ REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th); - /** + /* * The number of free blocks below which the full signal to * class 1 of MAC #n is asserted. n=0,1 - **/ + */ REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th); - /** + /* * The number of free blocks above which the full signal to * class 1 of MAC #n is de-asserted. n=0,1 - **/ + */ REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th); } } @@ -896,7 +865,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, FEATURE_CONFIG_PFC_ENABLED; DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); - /** + /* * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set * MAC control frames (that are not pause packets) * will be forwarded to the XCM. @@ -904,7 +873,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : NIG_REG_LLH0_XCM_MASK); - /** + /* * nig params will override non PFC params, since it's possible to * do transition from PFC to SAFC */ @@ -994,7 +963,7 @@ void bnx2x_update_pfc(struct link_params *params, struct link_vars *vars, struct bnx2x_nig_brb_pfc_port_params *pfc_params) { - /** + /* * The PFC and pause are orthogonal to one another, meaning when * PFC is enabled, the pause are disabled, and when PFC is * disabled, pause are set according to the pause result. @@ -1035,7 +1004,7 @@ void bnx2x_update_pfc(struct link_params *params, static u8 bnx2x_bmac1_enable(struct link_params *params, struct link_vars *vars, - u8 is_lb) + u8 is_lb) { struct bnx2x *bp = params->bp; u8 port = params->port; @@ -1049,9 +1018,8 @@ static u8 bnx2x_bmac1_enable(struct link_params *params, /* XGXS control */ wb_data[0] = 0x3c; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + - BIGMAC_REGISTER_BMAC_XGXS_CONTROL, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); /* tx MAC SA */ wb_data[0] = ((params->mac_addr[2] << 24) | @@ -1060,8 +1028,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params, params->mac_addr[5]); wb_data[1] = ((params->mac_addr[0] << 8) | params->mac_addr[1]); - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); /* mac control */ val = 0x3; @@ -1071,43 +1038,30 @@ static u8 bnx2x_bmac1_enable(struct link_params *params, } wb_data[0] = val; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); /* set rx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); bnx2x_update_pfc_bmac1(params, vars); /* set tx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); /* set cnt max size */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); /* configure safc */ wb_data[0] = 0x1000200; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, wb_data, 2); - /* fix for emulation */ - if (CHIP_REV_IS_EMUL(bp)) { - wb_data[0] = 0xf000; - wb_data[1] = 0; - REG_WR_DMAE(bp, - bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD, - wb_data, 2); - } - return 0; } @@ -1126,16 +1080,14 @@ static u8 bnx2x_bmac2_enable(struct link_params *params, wb_data[0] = 0; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2); udelay(30); /* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */ wb_data[0] = 0x3c; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL, + wb_data, 2); udelay(30); @@ -1147,7 +1099,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params, wb_data[1] = ((params->mac_addr[0] << 8) | params->mac_addr[1]); REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR, - wb_data, 2); + wb_data, 2); udelay(30); @@ -1155,27 +1107,24 @@ static u8 bnx2x_bmac2_enable(struct link_params *params, wb_data[0] = 0x1000200; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS, - wb_data, 2); + wb_data, 2); udelay(30); /* set rx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); udelay(30); /* set tx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); udelay(30); /* set cnt max size */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; wb_data[1] = 0; - REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, - wb_data, 2); + REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); udelay(30); bnx2x_update_pfc_bmac2(params, vars, is_lb); @@ -1191,11 +1140,11 @@ static u8 bnx2x_bmac_enable(struct link_params *params, u32 val; /* reset and unreset the BigMac */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); msleep(1); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); /* enable access for bmac registers */ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); @@ -1230,15 +1179,14 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status) struct bnx2x *bp = params->bp; REG_WR(bp, params->shmem_base + - offsetof(struct shmem_region, - port_mb[params->port].link_status), - link_status); + offsetof(struct shmem_region, + port_mb[params->port].link_status), link_status); } static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) { u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : - NIG_REG_INGRESS_BMAC0_MEM; + NIG_REG_INGRESS_BMAC0_MEM; u32 wb_data[2]; u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); @@ -1250,12 +1198,12 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) if (CHIP_IS_E2(bp)) { /* Clear Rx Enable bit in BMAC_CONTROL register */ REG_RD_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); + BIGMAC2_REGISTER_BMAC_CONTROL, + wb_data, 2); wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; REG_WR_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); + BIGMAC2_REGISTER_BMAC_CONTROL, + wb_data, 2); } else { /* Clear Rx Enable bit in BMAC_CONTROL register */ REG_RD_DMAE(bp, bmac_addr + @@ -1271,7 +1219,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) } static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, - u32 line_speed) + u32 line_speed) { struct bnx2x *bp = params->bp; u8 port = params->port; @@ -1308,7 +1256,7 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, /* update threshold */ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); /* update init credit */ - init_crd = 778; /* (800-18-4) */ + init_crd = 778; /* (800-18-4) */ } else { u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + @@ -1353,6 +1301,23 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, return 0; } +/* + * get_emac_base + * + * @param cb + * @param mdc_mdio_access + * @param port + * + * @return u32 + * + * This function selects the MDC/MDIO access (through emac0 or + * emac1) depend on the mdc_mdio_access, port, port swapped. Each + * phy has a default access mode, which could also be overridden + * by nvram configuration. This parameter, whether this is the + * default phy configuration, or the nvram overrun + * configuration, is passed here as mdc_mdio_access and selects + * the emac_base for the CL45 read/writes operations + */ static u32 bnx2x_get_emac_base(struct bnx2x *bp, u32 mdc_mdio_access, u8 port) { @@ -1385,13 +1350,16 @@ static u32 bnx2x_get_emac_base(struct bnx2x *bp, } -u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, - u8 devad, u16 reg, u16 val) +/******************************************************************/ +/* CL45 access functions */ +/******************************************************************/ +static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 val) { u32 tmp, saved_mode; u8 i, rc = 0; - - /* set clause 45 mode, slow down the MDIO clock to 2.5MHz + /* + * Set clause 45 mode, slow down the MDIO clock to 2.5MHz * (a value of 49==0x31) and make sure that the AUTO poll is off */ @@ -1414,8 +1382,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, for (i = 0; i < 50; i++) { udelay(10); - tmp = REG_RD(bp, phy->mdio_ctrl + - EMAC_REG_EMAC_MDIO_COMM); + tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM); if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { udelay(5); break; @@ -1423,6 +1390,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, } if (tmp & EMAC_MDIO_COMM_START_BUSY) { DP(NETIF_MSG_LINK, "write phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); rc = -EFAULT; } else { /* data */ @@ -1435,7 +1403,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, udelay(10); tmp = REG_RD(bp, phy->mdio_ctrl + - EMAC_REG_EMAC_MDIO_COMM); + EMAC_REG_EMAC_MDIO_COMM); if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) { udelay(5); break; @@ -1443,6 +1411,7 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, } if (tmp & EMAC_MDIO_COMM_START_BUSY) { DP(NETIF_MSG_LINK, "write phy register failed\n"); + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); rc = -EFAULT; } } @@ -1453,20 +1422,20 @@ u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, return rc; } -u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, - u8 devad, u16 reg, u16 *ret_val) +static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, + u8 devad, u16 reg, u16 *ret_val) { u32 val, saved_mode; u16 i; u8 rc = 0; - - /* set clause 45 mode, slow down the MDIO clock to 2.5MHz + /* + * Set clause 45 mode, slow down the MDIO clock to 2.5MHz * (a value of 49==0x31) and make sure that the AUTO poll is off */ saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE); val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL | - EMAC_MDIO_MODE_CLOCK_CNT)); + EMAC_MDIO_MODE_CLOCK_CNT)); val |= (EMAC_MDIO_MODE_CLAUSE_45 | (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT)); REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val); @@ -1490,7 +1459,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, } if (val & EMAC_MDIO_COMM_START_BUSY) { DP(NETIF_MSG_LINK, "read phy register failed\n"); - + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); *ret_val = 0; rc = -EFAULT; @@ -1505,7 +1474,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, udelay(10); val = REG_RD(bp, phy->mdio_ctrl + - EMAC_REG_EMAC_MDIO_COMM); + EMAC_REG_EMAC_MDIO_COMM); if (!(val & EMAC_MDIO_COMM_START_BUSY)) { *ret_val = (u16)(val & EMAC_MDIO_COMM_DATA); break; @@ -1513,7 +1482,7 @@ u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, } if (val & EMAC_MDIO_COMM_START_BUSY) { DP(NETIF_MSG_LINK, "read phy register failed\n"); - + netdev_err(bp->dev, "MDC/MDIO access timeout\n"); *ret_val = 0; rc = -EFAULT; } @@ -1529,7 +1498,7 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr, u8 devad, u16 reg, u16 *ret_val) { u8 phy_index; - /** + /* * Probe for the phy according to the given phy_addr, and execute * the read request on it */ @@ -1547,7 +1516,7 @@ u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr, u8 devad, u16 reg, u16 val) { u8 phy_index; - /** + /* * Probe for the phy according to the given phy_addr, and execute * the write request on it */ @@ -1576,16 +1545,15 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params, aer_val = 0x3800 + offset - 1; else aer_val = 0x3800 + offset; - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_AER_BLOCK, - MDIO_AER_BLOCK_AER_REG, aer_val); + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, aer_val); } static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp, struct bnx2x_phy *phy) { - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_AER_BLOCK, - MDIO_AER_BLOCK_AER_REG, 0x3800); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_AER_BLOCK, + MDIO_AER_BLOCK_AER_REG, 0x3800); } /******************************************************************/ @@ -1621,9 +1589,8 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) bnx2x_set_serdes_access(bp, port); - REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + - port*0x10, - DEFAULT_PHY_DEV_ADDR); + REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10, + DEFAULT_PHY_DEV_ADDR); } static void bnx2x_xgxs_deassert(struct link_params *params) @@ -1641,23 +1608,22 @@ static void bnx2x_xgxs_deassert(struct link_params *params) udelay(500); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + - port*0x18, 0); + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, - params->phy[INT_PHY].def_md_devad); + params->phy[INT_PHY].def_md_devad); } void bnx2x_link_status_update(struct link_params *params, - struct link_vars *vars) + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 link_10g; u8 port = params->port; vars->link_status = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, - port_mb[port].link_status)); + offsetof(struct shmem_region, + port_mb[port].link_status)); vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP); @@ -1667,7 +1633,7 @@ void bnx2x_link_status_update(struct link_params *params, vars->phy_link_up = 1; vars->duplex = DUPLEX_FULL; switch (vars->link_status & - LINK_STATUS_SPEED_AND_DUPLEX_MASK) { + LINK_STATUS_SPEED_AND_DUPLEX_MASK) { case LINK_10THD: vars->duplex = DUPLEX_HALF; /* fall thru */ @@ -1779,20 +1745,20 @@ static void bnx2x_set_master_ln(struct link_params *params, { struct bnx2x *bp = params->bp; u16 new_master_ln, ser_lane; - ser_lane = ((params->lane_config & + ser_lane = ((params->lane_config & PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); /* set the master_ln for AN */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_TEST_MODE_LANE, - &new_master_ln); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, + &new_master_ln); - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2 , - MDIO_XGXS_BLOCK2_TEST_MODE_LANE, - (new_master_ln | ser_lane)); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2 , + MDIO_XGXS_BLOCK2_TEST_MODE_LANE, + (new_master_ln | ser_lane)); } static u8 bnx2x_reset_unicore(struct link_params *params, @@ -1802,17 +1768,16 @@ static u8 bnx2x_reset_unicore(struct link_params *params, struct bnx2x *bp = params->bp; u16 mii_control; u16 i; - - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); /* reset the unicore */ - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - (mii_control | - MDIO_COMBO_IEEO_MII_CONTROL_RESET)); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | + MDIO_COMBO_IEEO_MII_CONTROL_RESET)); if (set_serdes) bnx2x_set_serdes_access(bp, params->port); @@ -1821,10 +1786,10 @@ static u8 bnx2x_reset_unicore(struct link_params *params, udelay(5); /* the reset erased the previous bank value */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - &mii_control); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) { udelay(5); @@ -1832,6 +1797,9 @@ static u8 bnx2x_reset_unicore(struct link_params *params, } } + netdev_err(bp->dev, "Warning: PHY was not initialized," + " Port %d\n", + params->port); DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n"); return -EINVAL; @@ -1841,43 +1809,45 @@ static void bnx2x_set_swap_lanes(struct link_params *params, struct bnx2x_phy *phy) { struct bnx2x *bp = params->bp; - /* Each two bits represents a lane number: - No swap is 0123 => 0x1b no need to enable the swap */ + /* + * Each two bits represents a lane number: + * No swap is 0123 => 0x1b no need to enable the swap + */ u16 ser_lane, rx_lane_swap, tx_lane_swap; ser_lane = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); rx_lane_swap = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); + PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT); tx_lane_swap = ((params->lane_config & - PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> - PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); + PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >> + PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT); if (rx_lane_swap != 0x1b) { - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_RX_LN_SWAP, - (rx_lane_swap | - MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | - MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, + (rx_lane_swap | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE | + MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE)); } else { - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0); } if (tx_lane_swap != 0x1b) { - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_TX_LN_SWAP, - (tx_lane_swap | - MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, + (tx_lane_swap | + MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE)); } else { - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0); } } @@ -1886,66 +1856,66 @@ static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u16 control2; - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, - &control2); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, + &control2); if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; else control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n", phy->speed_cap_mask, control2); - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, - control2); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, + control2); if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) && (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { DP(NETIF_MSG_LINK, "XGXS\n"); - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT); - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, - &control2); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + &control2); control2 |= MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN; - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, - control2); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL, + control2); /* Disable parallel detection of HiG */ - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_XGXS_BLOCK2, - MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, - MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | - MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_XGXS_BLOCK2, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G, + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS | + MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS); } } static void bnx2x_set_autoneg(struct bnx2x_phy *phy, struct link_params *params, - struct link_vars *vars, - u8 enable_cl73) + struct link_vars *vars, + u8 enable_cl73) { struct bnx2x *bp = params->bp; u16 reg_val; /* CL37 Autoneg */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); /* CL37 Autoneg Enabled */ if (vars->line_speed == SPEED_AUTO_NEG) @@ -1954,15 +1924,15 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN); - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); /* Enable/Disable Autodetection */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val); reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT); reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE; @@ -1971,14 +1941,14 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, else reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET; - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val); /* Enable TetonII and BAM autoneg */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_BAM_NEXT_PAGE, - MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, ®_val); if (vars->line_speed == SPEED_AUTO_NEG) { /* Enable BAM aneg Mode and TetonII aneg Mode */ @@ -1989,20 +1959,20 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE | MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN); } - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_BAM_NEXT_PAGE, - MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, - reg_val); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_BAM_NEXT_PAGE, + MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL, + reg_val); if (enable_cl73) { /* Enable Cl73 FSM status bits */ - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_USERB0, - MDIO_CL73_USERB0_CL73_UCTRL, - 0xe); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_UCTRL, + 0xe); /* Enable BAM Station Manager*/ - CL45_WR_OVER_CL22(bp, phy, + CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_CL73_USERB0, MDIO_CL73_USERB0_CL73_BAM_CTRL1, MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN | @@ -2010,10 +1980,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); /* Advertise CL73 link speeds */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV2, - ®_val); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, + ®_val); if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4; @@ -2021,10 +1991,10 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX; - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV2, - reg_val); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV2, + reg_val); /* CL73 Autoneg Enabled */ reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; @@ -2032,37 +2002,39 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, } else /* CL73 Autoneg Disabled */ reg_val = 0; - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); } /* program SerDes, forced speed */ static void bnx2x_program_serdes(struct bnx2x_phy *phy, struct link_params *params, - struct link_vars *vars) + struct link_vars *vars) { struct bnx2x *bp = params->bp; u16 reg_val; /* program duplex, disable autoneg and sgmii*/ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX | MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK); if (phy->req_duplex == DUPLEX_FULL) reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); - - /* program speed - - needed only if the speed is greater than 1G (2.5G or 10G) */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_MISC1, ®_val); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); + + /* + * program speed + * - needed only if the speed is greater than 1G (2.5G or 10G) + */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, ®_val); /* clearing the speed value before setting the right speed */ DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); @@ -2083,9 +2055,9 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy, MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G; } - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_MISC1, reg_val); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_MISC1, reg_val); } @@ -2102,13 +2074,13 @@ static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x_phy *phy, val |= MDIO_OVER_1G_UP1_2_5G; if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) val |= MDIO_OVER_1G_UP1_10G; - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_OVER_1G, - MDIO_OVER_1G_UP1, val); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_UP1, val); - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_OVER_1G, - MDIO_OVER_1G_UP3, 0x400); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_UP3, 0x400); } static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, @@ -2116,22 +2088,21 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; - /* resolve pause mode and advertisement - * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ + /* + * Resolve pause mode and advertisement. + * Please refer to Table 28B-3 of the 802.3ab-1999 spec + */ switch (phy->req_flow_ctrl) { case BNX2X_FLOW_CTRL_AUTO: - if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) { - *ieee_fc |= - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; - } else { + if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; + else *ieee_fc |= - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; - } + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; break; case BNX2X_FLOW_CTRL_TX: - *ieee_fc |= - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; + *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; break; case BNX2X_FLOW_CTRL_RX: @@ -2149,23 +2120,23 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy, struct link_params *params, - u16 ieee_fc) + u16 ieee_fc) { struct bnx2x *bp = params->bp; u16 val; /* for AN, we are always publishing full duplex */ - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV1, &val); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, &val); val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH; val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK); - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV1, val); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, val); } static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, @@ -2179,67 +2150,67 @@ static void bnx2x_restart_autoneg(struct bnx2x_phy *phy, /* Enable and restart BAM/CL37 aneg */ if (enable_cl73) { - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - &mii_control); - - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - (mii_control | - MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | - MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + &mii_control); + + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + (mii_control | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN | + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN)); } else { - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - &mii_control); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg mii_control before = 0x%x\n", mii_control); - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - (mii_control | - MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | - MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + (mii_control | + MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | + MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN)); } } static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, struct link_params *params, - struct link_vars *vars) + struct link_vars *vars) { struct bnx2x *bp = params->bp; u16 control1; /* in SGMII mode, the unicore is always slave */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, - &control1); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, + &control1); control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; /* set sgmii mode (and not fiber) */ control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, - control1); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, + control1); /* if forced speed */ if (!(vars->line_speed == SPEED_AUTO_NEG)) { /* set speed, disable autoneg */ u16 mii_control; - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - &mii_control); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + &mii_control); mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN | MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK| MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX); @@ -2267,10 +2238,10 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, if (phy->req_duplex == DUPLEX_FULL) mii_control |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_MII_CONTROL, - mii_control); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_MII_CONTROL, + mii_control); } else { /* AN mode */ /* enable and restart AN */ @@ -2285,19 +2256,19 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) { /* LD LP */ - switch (pause_result) { /* ASYM P ASYM P */ - case 0xb: /* 1 0 1 1 */ + switch (pause_result) { /* ASYM P ASYM P */ + case 0xb: /* 1 0 1 1 */ vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; break; - case 0xe: /* 1 1 1 0 */ + case 0xe: /* 1 1 1 0 */ vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; break; - case 0x5: /* 0 1 0 1 */ - case 0x7: /* 0 1 1 1 */ - case 0xd: /* 1 1 0 1 */ - case 0xf: /* 1 1 1 1 */ + case 0x5: /* 0 1 0 1 */ + case 0x7: /* 0 1 1 1 */ + case 0xd: /* 1 1 0 1 */ + case 0xf: /* 1 1 1 1 */ vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; break; @@ -2317,24 +2288,24 @@ static u8 bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, u16 pd_10g, status2_1000x; if (phy->req_line_speed != SPEED_AUTO_NEG) return 0; - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_STATUS2, - &status2_1000x); - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_SERDES_DIGITAL, - MDIO_SERDES_DIGITAL_A_1000X_STATUS2, - &status2_1000x); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, + &status2_1000x); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_SERDES_DIGITAL, + MDIO_SERDES_DIGITAL_A_1000X_STATUS2, + &status2_1000x); if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n", params->port); return 1; } - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_10G_PARALLEL_DETECT, - MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, - &pd_10g); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_10G_PARALLEL_DETECT, + MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, + &pd_10g); if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n", @@ -2373,14 +2344,14 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy, (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) { - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_ADV1, - &ld_pause); - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_IEEEB1, - MDIO_CL73_IEEEB1_AN_LP_ADV1, - &lp_pause); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_ADV1, + &ld_pause); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB1, + MDIO_CL73_IEEEB1_AN_LP_ADV1, + &lp_pause); pause_result = (ld_pause & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8; @@ -2390,18 +2361,18 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", pause_result); } else { - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_ADV, - &ld_pause); - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_COMBO_IEEE0, - MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, - &lp_pause); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_ADV, + &ld_pause); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_COMBO_IEEE0, + MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, + &lp_pause); pause_result = (ld_pause & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; pause_result |= (lp_pause & - MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; + MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result); } @@ -2417,25 +2388,25 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, u16 rx_status, ustat_val, cl37_fsm_recieved; DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n"); /* Step 1: Make sure signal is detected */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_RX0, - MDIO_RX0_RX_STATUS, - &rx_status); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_RX0, + MDIO_RX0_RX_STATUS, + &rx_status); if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) != (MDIO_RX0_RX_STATUS_SIGDET)) { DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73." "rx_status(0x80b0) = 0x%x\n", rx_status); - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN); return; } /* Step 2: Check CL73 state machine */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_USERB0, - MDIO_CL73_USERB0_CL73_USTAT1, - &ustat_val); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_USERB0, + MDIO_CL73_USERB0_CL73_USTAT1, + &ustat_val); if ((ustat_val & (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK | MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) != @@ -2445,12 +2416,14 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, "ustat_val(0x8371) = 0x%x\n", ustat_val); return; } - /* Step 3: Check CL37 Message Pages received to indicate LP - supports only CL37 */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_REMOTE_PHY, - MDIO_REMOTE_PHY_MISC_RX_STATUS, - &cl37_fsm_recieved); + /* + * Step 3: Check CL37 Message Pages received to indicate LP + * supports only CL37 + */ + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_REMOTE_PHY, + MDIO_REMOTE_PHY_MISC_RX_STATUS, + &cl37_fsm_recieved); if ((cl37_fsm_recieved & (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG | MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) != @@ -2461,14 +2434,18 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy, cl37_fsm_recieved); return; } - /* The combined cl37/cl73 fsm state information indicating that we are - connected to a device which does not support cl73, but does support - cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */ + /* + * The combined cl37/cl73 fsm state information indicating that + * we are connected to a device which does not support cl73, but + * does support cl37 BAM. In this case we disable cl73 and + * restart cl37 auto-neg + */ + /* Disable CL73 */ - CL45_WR_OVER_CL22(bp, phy, - MDIO_REG_BANK_CL73_IEEEB0, - MDIO_CL73_IEEEB0_CL73_AN_CONTROL, - 0); + CL22_WR_OVER_CL45(bp, phy, + MDIO_REG_BANK_CL73_IEEEB0, + MDIO_CL73_IEEEB0_CL73_AN_CONTROL, + 0); /* Restart CL37 autoneg */ bnx2x_restart_autoneg(phy, params, 0); DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n"); @@ -2493,14 +2470,14 @@ static u8 bnx2x_link_settings_status(struct bnx2x_phy *phy, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 new_line_speed , gp_status; + u16 new_line_speed, gp_status; u8 rc = 0; /* Read gp_status */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_GP_STATUS, - MDIO_GP_STATUS_TOP_AN_STATUS1, - &gp_status); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); if (phy->req_line_speed == SPEED_AUTO_NEG) vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED; @@ -2637,9 +2614,9 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params) u16 bank; /* read precomp */ - CL45_RD_OVER_CL22(bp, phy, - MDIO_REG_BANK_OVER_1G, - MDIO_OVER_1G_LP_UP2, &lp_up2); + CL22_RD_OVER_CL45(bp, phy, + MDIO_REG_BANK_OVER_1G, + MDIO_OVER_1G_LP_UP2, &lp_up2); /* bits [10:7] at lp_up2, positioned at [15:12] */ lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> @@ -2651,18 +2628,18 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params) for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3; bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) { - CL45_RD_OVER_CL22(bp, phy, - bank, - MDIO_TX0_TX_DRIVER, &tx_driver); + CL22_RD_OVER_CL45(bp, phy, + bank, + MDIO_TX0_TX_DRIVER, &tx_driver); /* replace tx_driver bits [15:12] */ if (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; tx_driver |= lp_up2; - CL45_WR_OVER_CL22(bp, phy, - bank, - MDIO_TX0_TX_DRIVER, tx_driver); + CL22_WR_OVER_CL45(bp, phy, + bank, + MDIO_TX0_TX_DRIVER, tx_driver); } } } @@ -2676,10 +2653,10 @@ static u8 bnx2x_emac_program(struct link_params *params, DP(NETIF_MSG_LINK, "setting link speed & duplex\n"); bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + - EMAC_REG_EMAC_MODE, - (EMAC_MODE_25G_MODE | - EMAC_MODE_PORT_MII_10M | - EMAC_MODE_HALF_DUPLEX)); + EMAC_REG_EMAC_MODE, + (EMAC_MODE_25G_MODE | + EMAC_MODE_PORT_MII_10M | + EMAC_MODE_HALF_DUPLEX)); switch (vars->line_speed) { case SPEED_10: mode |= EMAC_MODE_PORT_MII_10M; @@ -2707,8 +2684,8 @@ static u8 bnx2x_emac_program(struct link_params *params, if (vars->duplex == DUPLEX_HALF) mode |= EMAC_MODE_HALF_DUPLEX; bnx2x_bits_en(bp, - GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, - mode); + GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, + mode); bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); return 0; @@ -2723,7 +2700,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy, for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3; bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) { - CL45_WR_OVER_CL22(bp, phy, + CL22_WR_OVER_CL45(bp, phy, bank, MDIO_RX0_RX_EQ_BOOST, phy->rx_preemphasis[i]); @@ -2731,7 +2708,7 @@ static void bnx2x_set_preemphasis(struct bnx2x_phy *phy, for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3; bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) { - CL45_WR_OVER_CL22(bp, phy, + CL22_WR_OVER_CL45(bp, phy, bank, MDIO_TX0_TX_DRIVER, phy->tx_preemphasis[i]); @@ -2754,7 +2731,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy, /* forced speed requested? */ if (vars->line_speed != SPEED_AUTO_NEG || (SINGLE_MEDIA_DIRECT(params) && - params->loopback_mode == LOOPBACK_EXT)) { + params->loopback_mode == LOOPBACK_EXT)) { DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); /* disable autoneg */ @@ -2771,7 +2748,7 @@ static void bnx2x_init_internal_phy(struct bnx2x_phy *phy, /* program duplex & pause advertisement (for aneg) */ bnx2x_set_ieee_aneg_advertisment(phy, params, - vars->ieee_fc); + vars->ieee_fc); /* enable autoneg */ bnx2x_set_autoneg(phy, params, vars, enable_cl73); @@ -2842,7 +2819,8 @@ static u8 bnx2x_init_xgxs(struct bnx2x_phy *phy, } static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, - struct bnx2x_phy *phy) + struct bnx2x_phy *phy, + struct link_params *params) { u16 cnt, ctrl; /* Wait for soft reset to get cleared upto 1 sec */ @@ -2853,6 +2831,11 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, break; msleep(1); } + + if (cnt == 1000) + netdev_err(bp->dev, "Warning: PHY was not initialized," + " Port %d\n", + params->port); DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt); return cnt; } @@ -2863,9 +2846,7 @@ static void bnx2x_link_int_enable(struct link_params *params) u32 mask; struct bnx2x *bp = params->bp; - /* setting the status to report on link up - for either XGXS or SerDes */ - + /* Setting the status to report on link up for either XGXS or SerDes */ if (params->switch_cfg == SWITCH_CFG_10G) { mask = (NIG_MASK_XGXS0_LINK10G | NIG_MASK_XGXS0_LINK_STATUS); @@ -2908,7 +2889,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, { u32 latch_status = 0; - /** + /* * Disable the MI INT ( external phy int ) by writing 1 to the * status register. Link down indication is high-active-signal, * so in this case we need to write the status to clear the XOR @@ -2933,27 +2914,30 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port, /* For all latched-signal=up : Re-Arm Latch signals */ REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8, - (latch_status & 0xfffe) | (latch_status & 1)); + (latch_status & 0xfffe) | (latch_status & 1)); } /* For all latched-signal=up,Write original_signal to status */ } static void bnx2x_link_int_ack(struct link_params *params, - struct link_vars *vars, u8 is_10g) + struct link_vars *vars, u8 is_10g) { struct bnx2x *bp = params->bp; u8 port = params->port; - /* first reset all status - * we assume only one line will be change at a time */ + /* + * First reset all status we assume only one line will be + * change at a time + */ bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, - (NIG_STATUS_XGXS0_LINK10G | - NIG_STATUS_XGXS0_LINK_STATUS | - NIG_STATUS_SERDES0_LINK_STATUS)); + (NIG_STATUS_XGXS0_LINK10G | + NIG_STATUS_XGXS0_LINK_STATUS | + NIG_STATUS_SERDES0_LINK_STATUS)); if (vars->phy_link_up) { if (is_10g) { - /* Disable the 10G link interrupt - * by writing 1 to the status register + /* + * Disable the 10G link interrupt by writing 1 to the + * status register */ DP(NETIF_MSG_LINK, "10G XGXS phy link up\n"); bnx2x_bits_en(bp, @@ -2961,9 +2945,9 @@ static void bnx2x_link_int_ack(struct link_params *params, NIG_STATUS_XGXS0_LINK10G); } else if (params->switch_cfg == SWITCH_CFG_10G) { - /* Disable the link interrupt - * by writing 1 to the relevant lane - * in the status register + /* + * Disable the link interrupt by writing 1 to the + * relevant lane in the status register */ u32 ser_lane = ((params->lane_config & PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> @@ -2978,8 +2962,9 @@ static void bnx2x_link_int_ack(struct link_params *params, } else { /* SerDes */ DP(NETIF_MSG_LINK, "SerDes phy link up\n"); - /* Disable the link interrupt - * by writing 1 to the status register + /* + * Disable the link interrupt by writing 1 to the status + * register */ bnx2x_bits_en(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, @@ -3059,8 +3044,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded, } if ((params->num_phys == MAX_PHYS) && (params->phy[EXT_PHY2].ver_addr != 0)) { - spirom_ver = REG_RD(bp, - params->phy[EXT_PHY2].ver_addr); + spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr); if (params->phy[EXT_PHY2].format_fw_ver) { *ver_p = '/'; ver_p++; @@ -3089,29 +3073,27 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, /* change the uni_phy_addr in the nig */ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + - port*0x18)); + port*0x18)); REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5); bnx2x_cl45_write(bp, phy, - 5, - (MDIO_REG_BANK_AER_BLOCK + - (MDIO_AER_BLOCK_AER_REG & 0xf)), - 0x2800); + 5, + (MDIO_REG_BANK_AER_BLOCK + + (MDIO_AER_BLOCK_AER_REG & 0xf)), + 0x2800); bnx2x_cl45_write(bp, phy, - 5, - (MDIO_REG_BANK_CL73_IEEEB0 + - (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), - 0x6041); + 5, + (MDIO_REG_BANK_CL73_IEEEB0 + + (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), + 0x6041); msleep(200); /* set aer mmd back */ bnx2x_set_aer_mmd_xgxs(params, phy); /* and md_devad */ - REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, - md_devad); - + REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad); } else { u16 mii_ctrl; DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n"); @@ -3152,26 +3134,26 @@ u8 bnx2x_set_led(struct link_params *params, case LED_MODE_OFF: REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0); REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, - SHARED_HW_CFG_LED_MAC1); + SHARED_HW_CFG_LED_MAC1); tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE)); break; case LED_MODE_OPER: - /** + /* * For all other phys, OPER mode is same as ON, so in case * link is down, do nothing - **/ + */ if (!vars->link_up) break; case LED_MODE_ON: if (params->phy[EXT_PHY1].type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 && CHIP_IS_E2(bp) && params->num_phys == 2) { - /** - * This is a work-around for E2+8727 Configurations - */ + /* + * This is a work-around for E2+8727 Configurations + */ if (mode == LED_MODE_ON || speed == SPEED_10000){ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); @@ -3183,41 +3165,40 @@ u8 bnx2x_set_led(struct link_params *params, return rc; } } else if (SINGLE_MEDIA_DIRECT(params)) { - /** - * This is a work-around for HW issue found when link - * is up in CL73 - */ + /* + * This is a work-around for HW issue found when link + * is up in CL73 + */ REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1); } else { - REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, - hw_led_mode); + REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); } - REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + - port*4, 0); + REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0); /* Set blinking rate to ~15.9Hz */ REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4, - LED_BLINK_RATE_VAL); + LED_BLINK_RATE_VAL); REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + - port*4, 1); + port*4, 1); tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); - EMAC_WR(bp, EMAC_REG_EMAC_LED, - (tmp & (~EMAC_LED_OVERRIDE))); + EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE))); if (CHIP_IS_E1(bp) && ((speed == SPEED_2500) || (speed == SPEED_1000) || (speed == SPEED_100) || (speed == SPEED_10))) { - /* On Everest 1 Ax chip versions for speeds less than - 10G LED scheme is different */ + /* + * On Everest 1 Ax chip versions for speeds less than + * 10G LED scheme is different + */ REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 - + port*4, 1); + + port*4, 1); REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + - port*4, 0); + port*4, 0); REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + - port*4, 1); + port*4, 1); } break; @@ -3231,7 +3212,7 @@ u8 bnx2x_set_led(struct link_params *params, } -/** +/* * This function comes to reflect the actual link state read DIRECTLY from the * HW */ @@ -3243,10 +3224,10 @@ u8 bnx2x_test_link(struct link_params *params, struct link_vars *vars, u8 ext_phy_link_up = 0, serdes_phy_type; struct link_vars temp_vars; - CL45_RD_OVER_CL22(bp, ¶ms->phy[INT_PHY], - MDIO_REG_BANK_GP_STATUS, - MDIO_GP_STATUS_TOP_AN_STATUS1, - &gp_status); + CL22_RD_OVER_CL45(bp, ¶ms->phy[INT_PHY], + MDIO_REG_BANK_GP_STATUS, + MDIO_GP_STATUS_TOP_AN_STATUS1, + &gp_status); /* link is up only if both local phy and external phy are up */ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) return -ESRCH; @@ -3290,15 +3271,15 @@ static u8 bnx2x_link_initialize(struct link_params *params, u8 rc = 0; u8 phy_index, non_ext_phy; struct bnx2x *bp = params->bp; - /** - * In case of external phy existence, the line speed would be the - * line speed linked up by the external phy. In case it is direct - * only, then the line_speed during initialization will be - * equal to the req_line_speed - */ + /* + * In case of external phy existence, the line speed would be the + * line speed linked up by the external phy. In case it is direct + * only, then the line_speed during initialization will be + * equal to the req_line_speed + */ vars->line_speed = params->phy[INT_PHY].req_line_speed; - /** + /* * Initialize the internal phy in case this is a direct board * (no external phys), or this board has external phy which requires * to first. @@ -3326,17 +3307,16 @@ static u8 bnx2x_link_initialize(struct link_params *params, if (!non_ext_phy) for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { - /** + /* * No need to initialize second phy in case of first * phy only selection. In case of second phy, we do * need to initialize the first phy, since they are * connected. - **/ + */ if (phy_index == EXT_PHY2 && (bnx2x_phy_selection(params) == PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) { - DP(NETIF_MSG_LINK, "Not initializing" - "second phy\n"); + DP(NETIF_MSG_LINK, "Ignoring second phy\n"); continue; } params->phy[phy_index].config_init( @@ -3358,9 +3338,8 @@ static void bnx2x_int_link_reset(struct bnx2x_phy *phy, struct link_params *params) { /* reset the SerDes/XGXS */ - REG_WR(params->bp, GRCBASE_MISC + - MISC_REGISTERS_RESET_REG_3_CLEAR, - (0x1ff << (params->port*16))); + REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, + (0x1ff << (params->port*16))); } static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, @@ -3374,11 +3353,11 @@ static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy, else gpio_port = params->port; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - gpio_port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - gpio_port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); DP(NETIF_MSG_LINK, "reset external PHY\n"); } @@ -3409,9 +3388,8 @@ static u8 bnx2x_update_link_down(struct link_params *params, /* reset BigMac */ bnx2x_bmac_rx_disable(bp, params->port); - REG_WR(bp, GRCBASE_MISC + - MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); return 0; } @@ -3462,7 +3440,7 @@ static u8 bnx2x_update_link_up(struct link_params *params, msleep(20); return rc; } -/** +/* * The bnx2x_link_update function should be called upon link * interrupt. * Link is considered up as follows: @@ -3501,12 +3479,11 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4)); is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + - port*0x18) > 0); + port*0x18) > 0); DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n", REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), is_mi_int, - REG_RD(bp, - NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c)); + REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c)); DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n", REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), @@ -3515,14 +3492,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) /* disable emac */ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); - /** - * Step 1: - * Check external link change only for external phys, and apply - * priority selection between them in case the link on both phys - * is up. Note that the instead of the common vars, a temporary - * vars argument is used since each phy may have different link/ - * speed/duplex result - */ + /* + * Step 1: + * Check external link change only for external phys, and apply + * priority selection between them in case the link on both phys + * is up. Note that the instead of the common vars, a temporary + * vars argument is used since each phy may have different link/ + * speed/duplex result + */ for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { struct bnx2x_phy *phy = ¶ms->phy[phy_index]; @@ -3547,22 +3524,22 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) switch (bnx2x_phy_selection(params)) { case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: - /** + /* * In this option, the first PHY makes sure to pass the * traffic through itself only. * Its not clear how to reset the link on the second phy - **/ + */ active_external_phy = EXT_PHY1; break; case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: - /** + /* * In this option, the first PHY makes sure to pass the * traffic through the second PHY. - **/ + */ active_external_phy = EXT_PHY2; break; default: - /** + /* * Link indication on both PHYs with the following cases * is invalid: * - FIRST_PHY means that second phy wasn't initialized, @@ -3570,7 +3547,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) * - SECOND_PHY means that first phy should not be able * to link up by itself (using configuration) * - DEFAULT should be overriden during initialiazation - **/ + */ DP(NETIF_MSG_LINK, "Invalid link indication" "mpc=0x%x. DISABLING LINK !!!\n", params->multi_phy_config); @@ -3580,18 +3557,18 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) } } prev_line_speed = vars->line_speed; - /** - * Step 2: - * Read the status of the internal phy. In case of - * DIRECT_SINGLE_MEDIA board, this link is the external link, - * otherwise this is the link between the 577xx and the first - * external phy - */ + /* + * Step 2: + * Read the status of the internal phy. In case of + * DIRECT_SINGLE_MEDIA board, this link is the external link, + * otherwise this is the link between the 577xx and the first + * external phy + */ if (params->phy[INT_PHY].read_status) params->phy[INT_PHY].read_status( ¶ms->phy[INT_PHY], params, vars); - /** + /* * The INT_PHY flow control reside in the vars. This include the * case where the speed or flow control are not set to AUTO. * Otherwise, the active external phy flow control result is set @@ -3601,13 +3578,13 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) */ if (active_external_phy > INT_PHY) { vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; - /** + /* * Link speed is taken from the XGXS. AN and FC result from * the external phy. */ vars->link_status |= phy_vars[active_external_phy].link_status; - /** + /* * if active_external_phy is first PHY and link is up - disable * disable TX on second external PHY */ @@ -3643,7 +3620,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," " ext_phy_line_speed = %d\n", vars->flow_ctrl, vars->link_status, ext_phy_line_speed); - /** + /* * Upon link speed change set the NIG into drain mode. Comes to * deals with possible FIFO glitch due to clk change when speed * is decreased without link down indicator @@ -3658,8 +3635,8 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) ext_phy_line_speed); vars->phy_link_up = 0; } else if (prev_line_speed != vars->line_speed) { - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE - + params->port*4, 0); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, + 0); msleep(1); } } @@ -3674,14 +3651,14 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) bnx2x_link_int_ack(params, vars, link_10g); - /** - * In case external phy link is up, and internal link is down - * (not initialized yet probably after link initialization, it - * needs to be initialized. - * Note that after link down-up as result of cable plug, the xgxs - * link would probably become up again without the need - * initialize it - */ + /* + * In case external phy link is up, and internal link is down + * (not initialized yet probably after link initialization, it + * needs to be initialized. + * Note that after link down-up as result of cable plug, the xgxs + * link would probably become up again without the need + * initialize it + */ if (!(SINGLE_MEDIA_DIRECT(params))) { DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d," " init_preceding = %d\n", ext_phy_link_up, @@ -3701,9 +3678,9 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) vars); } } - /** - * Link is up only if both local phy and external phy (in case of - * non-direct board) are up + /* + * Link is up only if both local phy and external phy (in case of + * non-direct board) are up */ vars->link_up = (vars->phy_link_up && (ext_phy_link_up || @@ -3724,10 +3701,10 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars) void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) { bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); msleep(1); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); } static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port, @@ -3747,9 +3724,9 @@ static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp, u16 fw_ver1, fw_ver2; bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER1, &fw_ver1); + MDIO_PMA_REG_ROM_VER1, &fw_ver1); bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, &fw_ver2); + MDIO_PMA_REG_ROM_VER2, &fw_ver2); bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr); } @@ -3770,7 +3747,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params, if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) == MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) { - val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; + val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC; } if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) == @@ -3801,11 +3778,11 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { ret = 1; bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_ADV_PAUSE, &ld_pause); + MDIO_AN_DEVAD, + MDIO_AN_REG_ADV_PAUSE, &ld_pause); bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); + MDIO_AN_DEVAD, + MDIO_AN_REG_LP_AUTO_NEG, &lp_pause); pause_result = (ld_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; pause_result |= (lp_pause & @@ -3881,31 +3858,31 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, /* Boot port from external ROM */ /* EDC grst */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - 0x0001); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + 0x0001); /* ucode reboot and rst */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - 0x008c); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + 0x008c); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0001); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0001); /* Reset internal microprocessor */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); /* Release srst bit */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); /* Delay 100ms per the PHY specifications */ msleep(100); @@ -3936,8 +3913,8 @@ static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, /* Clear ser_boot_ctl bit */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0000); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0000); bnx2x_save_bcm_spirom_ver(bp, phy, port); DP(NETIF_MSG_LINK, @@ -3958,8 +3935,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) /* Read 8073 HW revision*/ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_CHIP_REV, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_CHIP_REV, &val); if (val != 1) { /* No need to workaround in 8073 A1 */ @@ -3967,8 +3944,8 @@ static u8 bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy) } bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, &val); /* SNR should be applied only for version 0x102 */ if (val != 0x102) @@ -3982,8 +3959,8 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) u16 val, cnt, cnt1 ; bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_CHIP_REV, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_CHIP_REV, &val); if (val > 0) { /* No need to workaround in 8073 A1 */ @@ -3991,26 +3968,32 @@ static u8 bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) } /* XAUI workaround in 8073 A0: */ - /* After loading the boot ROM and restarting Autoneg, - poll Dev1, Reg $C820: */ + /* + * After loading the boot ROM and restarting Autoneg, poll + * Dev1, Reg $C820: + */ for (cnt = 0; cnt < 1000; cnt++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8073_SPEED_LINK_STATUS, - &val); - /* If bit [14] = 0 or bit [13] = 0, continue on with - system initialization (XAUI work-around not required, - as these bits indicate 2.5G or 1G link up). */ + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8073_SPEED_LINK_STATUS, + &val); + /* + * If bit [14] = 0 or bit [13] = 0, continue on with + * system initialization (XAUI work-around not required, as + * these bits indicate 2.5G or 1G link up). + */ if (!(val & (1<<14)) || !(val & (1<<13))) { DP(NETIF_MSG_LINK, "XAUI work-around not required\n"); return 0; } else if (!(val & (1<<15))) { - DP(NETIF_MSG_LINK, "clc bit 15 went off\n"); - /* If bit 15 is 0, then poll Dev1, Reg $C841 until - it's MSB (bit 15) goes to 1 (indicating that the - XAUI workaround has completed), - then continue on with system initialization.*/ + DP(NETIF_MSG_LINK, "bit 15 went off\n"); + /* + * If bit 15 is 0, then poll Dev1, Reg $C841 until it's + * MSB (bit15) goes to 1 (indicating that the XAUI + * workaround has completed), then continue on with + * system initialization. + */ for (cnt1 = 0; cnt1 < 1000; cnt1++) { bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, @@ -4093,10 +4076,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, gpio_port = params->port; /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); /* enable LASI */ bnx2x_cl45_write(bp, phy, @@ -4114,10 +4097,6 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); - /** - * If this is forced speed, set to KR or KX (all other are not - * supported) - */ /* Swap polarity if required - Must be done only in non-1G mode */ if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) { /* Configure the 8073 to swap _P and _N of the KR lines */ @@ -4160,8 +4139,10 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, val = (1<<7); } else if (phy->req_line_speed == SPEED_2500) { val = (1<<5); - /* Note that 2.5G works only - when used with 1G advertisment */ + /* + * Note that 2.5G works only when used with 1G + * advertisment + */ } else val = (1<<5); } else { @@ -4170,8 +4151,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) val |= (1<<7); - /* Note that 2.5G works only when - used with 1G advertisment */ + /* Note that 2.5G works only when used with 1G advertisment */ if (phy->speed_cap_mask & (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G | PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)) @@ -4211,9 +4191,11 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, /* Add support for CL37 (passive mode) III */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); - /* The SNR will improve about 2db by changing - BW and FEE main tap. Rest commands are executed - after link is up*/ + /* + * The SNR will improve about 2db by changing BW and FEE main + * tap. Rest commands are executed after link is up + * Change FFE main cursor to 5 in EDC register + */ if (bnx2x_8073_is_snr_needed(bp, phy)) bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN, @@ -4297,12 +4279,11 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { - /* The SNR will improve about 2dbby - changing the BW and FEE main tap.*/ - /* The 1st write to change FFE main - tap is set before restart AN */ - /* Change PLL Bandwidth in EDC - register */ + /* + * The SNR will improve about 2dbby changing the BW and FEE main + * tap. The 1st write to change FFE main tap is set before + * restart AN. Change PLL Bandwidth in EDC register + */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH, 0x26BC); @@ -4346,10 +4327,10 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); - /** - * Set bit 3 to invert Rx in 1G mode and clear this bit - * when it`s in 10G mode. - */ + /* + * Set bit 3 to invert Rx in 1G mode and clear this bit + * when it`s in 10G mode. + */ if (vars->line_speed == SPEED_1000) { DP(NETIF_MSG_LINK, "Swapping 1G polarity for" "the 8073\n"); @@ -4381,8 +4362,8 @@ static void bnx2x_8073_link_reset(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n", gpio_port); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - gpio_port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, + gpio_port); } /******************************************************************/ @@ -4396,11 +4377,11 @@ static u8 bnx2x_8705_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "init 8705\n"); /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); /* HW reset */ bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); - bnx2x_wait_reset_complete(bp, phy); + bnx2x_wait_reset_complete(bp, phy, params); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288); @@ -4451,35 +4432,79 @@ static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy, /******************************************************************/ /* SFP+ module Section */ /******************************************************************/ -static void bnx2x_sfp_set_transmitter(struct bnx2x *bp, +static u8 bnx2x_get_gpio_port(struct link_params *params) +{ + u8 gpio_port; + u32 swap_val, swap_override; + struct bnx2x *bp = params->bp; + if (CHIP_IS_E2(bp)) + gpio_port = BP_PATH(bp); + else + gpio_port = params->port; + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + return gpio_port ^ (swap_val && swap_override); +} +static void bnx2x_sfp_set_transmitter(struct link_params *params, struct bnx2x_phy *phy, - u8 port, u8 tx_en) { u16 val; + u8 port = params->port; + struct bnx2x *bp = params->bp; + u32 tx_en_mode; - DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n", - tx_en, port); /* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/ - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - &val); + tx_en_mode = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[port].sfp_ctrl)) & + PORT_HW_CFG_TX_LASER_MASK; + DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x " + "mode = %x\n", tx_en, port, tx_en_mode); + switch (tx_en_mode) { + case PORT_HW_CFG_TX_LASER_MDIO: - if (tx_en) - val &= ~(1<<15); - else - val |= (1<<15); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + &val); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - val); + if (tx_en) + val &= ~(1<<15); + else + val |= (1<<15); + + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + val); + break; + case PORT_HW_CFG_TX_LASER_GPIO0: + case PORT_HW_CFG_TX_LASER_GPIO1: + case PORT_HW_CFG_TX_LASER_GPIO2: + case PORT_HW_CFG_TX_LASER_GPIO3: + { + u16 gpio_pin; + u8 gpio_port, gpio_mode; + if (tx_en) + gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH; + else + gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW; + + gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0; + gpio_port = bnx2x_get_gpio_port(params); + bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); + break; + } + default: + DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode); + break; + } } static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) + u16 addr, u8 byte_cnt, u8 *o_buf) { struct bnx2x *bp = params->bp; u16 val = 0; @@ -4492,23 +4517,23 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* Set the read command byte count */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, - (byte_cnt | 0xa000)); + (byte_cnt | 0xa000)); /* Set the read command address */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, - addr); + addr); /* Activate read command */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, - 0x2c0f); + 0x2c0f); /* Wait up to 500us for command complete status */ for (i = 0; i < 100; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) break; @@ -4526,15 +4551,15 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* Read the buffer */ for (i = 0; i < byte_cnt; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val); o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK); } for (i = 0; i < 100; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) return 0; @@ -4545,7 +4570,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, - u16 addr, u8 byte_cnt, u8 *o_buf) + u16 addr, u8 byte_cnt, u8 *o_buf) { struct bnx2x *bp = params->bp; u16 val, i; @@ -4558,41 +4583,43 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* Need to read from 1.8000 to clear it */ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, - &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + &val); /* Set the read command byte count */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, - ((byte_cnt < 2) ? 2 : byte_cnt)); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, + ((byte_cnt < 2) ? 2 : byte_cnt)); /* Set the read command address */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, - addr); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR, + addr); /* Set the destination address */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - 0x8004, - MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); + MDIO_PMA_DEVAD, + 0x8004, + MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF); /* Activate read command */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, - 0x8002); - /* Wait appropriate time for two-wire command to finish before - polling the status register */ + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, + 0x8002); + /* + * Wait appropriate time for two-wire command to finish before + * polling the status register + */ msleep(1); /* Wait up to 500us for command complete status */ for (i = 0; i < 100; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) break; @@ -4604,21 +4631,21 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Got bad status 0x%x when reading from SFP+ EEPROM\n", (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK)); - return -EINVAL; + return -EFAULT; } /* Read the buffer */ for (i = 0; i < byte_cnt; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val); o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK); } for (i = 0; i < 100; i++) { bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) return 0; @@ -4628,22 +4655,22 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, return -EINVAL; } -static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, - struct link_params *params, u16 addr, - u8 byte_cnt, u8 *o_buf) +u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, u16 addr, + u8 byte_cnt, u8 *o_buf) { if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); + byte_cnt, o_buf); else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, - byte_cnt, o_buf); + byte_cnt, o_buf); return -EINVAL; } static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, struct link_params *params, - u16 *edc_mode) + u16 *edc_mode) { struct bnx2x *bp = params->bp; u8 val, check_limiting_mode = 0; @@ -4664,8 +4691,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, { u8 copper_module_type; - /* Check if its active cable( includes SFP+ module) - of passive cable*/ + /* + * Check if its active cable (includes SFP+ module) + * of passive cable + */ if (bnx2x_read_sfp_module_eeprom(phy, params, SFP_EEPROM_FC_TX_TECH_ADDR, @@ -4724,8 +4753,10 @@ static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); return 0; } -/* This function read the relevant field from the module ( SFP+ ), - and verify it is compliant with this board */ +/* + * This function read the relevant field from the module (SFP+), and verify it + * is compliant with this board + */ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, struct link_params *params) { @@ -4774,24 +4805,24 @@ static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy, /* format the warning message */ if (bnx2x_read_sfp_module_eeprom(phy, params, - SFP_EEPROM_VENDOR_NAME_ADDR, - SFP_EEPROM_VENDOR_NAME_SIZE, - (u8 *)vendor_name)) + SFP_EEPROM_VENDOR_NAME_ADDR, + SFP_EEPROM_VENDOR_NAME_SIZE, + (u8 *)vendor_name)) vendor_name[0] = '\0'; else vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; if (bnx2x_read_sfp_module_eeprom(phy, params, - SFP_EEPROM_PART_NO_ADDR, - SFP_EEPROM_PART_NO_SIZE, - (u8 *)vendor_pn)) + SFP_EEPROM_PART_NO_ADDR, + SFP_EEPROM_PART_NO_SIZE, + (u8 *)vendor_pn)) vendor_pn[0] = '\0'; else vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0'; - netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected," - " Port %d from %s part number %s\n", - params->port, vendor_name, vendor_pn); + netdev_err(bp->dev, "Warning: Unqualified SFP+ module detected," + " Port %d from %s part number %s\n", + params->port, vendor_name, vendor_pn); phy->flags |= FLAGS_SFP_NOT_APPROVED; return -EINVAL; } @@ -4803,8 +4834,11 @@ static u8 bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, u8 val; struct bnx2x *bp = params->bp; u16 timeout; - /* Initialization time after hot-plug may take up to 300ms for some - phys type ( e.g. JDSU ) */ + /* + * Initialization time after hot-plug may take up to 300ms for + * some phys type ( e.g. JDSU ) + */ + for (timeout = 0; timeout < 60; timeout++) { if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) == 0) { @@ -4823,16 +4857,14 @@ static void bnx2x_8727_power_module(struct bnx2x *bp, /* Make sure GPIOs are not using for LED mode */ u16 val; /* - * In the GPIO register, bit 4 is use to detemine if the GPIOs are + * In the GPIO register, bit 4 is use to determine if the GPIOs are * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for * output * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1 * where the 1st bit is the over-current(only input), and 2nd bit is * for power( only output ) - */ - - /* + * * In case of NOC feature is disabled and power is up, set GPIO control * as input to enable listening of over-current indication */ @@ -4861,15 +4893,14 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp, u16 cur_limiting_mode; bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - &cur_limiting_mode); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + &cur_limiting_mode); DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n", cur_limiting_mode); if (edc_mode == EDC_MODE_LIMITING) { - DP(NETIF_MSG_LINK, - "Setting LIMITING MODE\n"); + DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n"); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER2, @@ -4878,62 +4909,63 @@ static u8 bnx2x_8726_set_limiting_mode(struct bnx2x *bp, DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); - /* Changing to LRM mode takes quite few seconds. - So do it only if current mode is limiting - ( default is LRM )*/ + /* + * Changing to LRM mode takes quite few seconds. So do it only + * if current mode is limiting (default is LRM) + */ if (cur_limiting_mode != EDC_MODE_LIMITING) return 0; bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_LRM_MODE, - 0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LRM_MODE, + 0); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - 0x128); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + 0x128); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL0, - 0x4008); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL0, + 0x4008); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_LRM_MODE, - 0xaaaa); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_LRM_MODE, + 0xaaaa); } return 0; } static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp, struct bnx2x_phy *phy, - u16 edc_mode) + u16 edc_mode) { u16 phy_identifier; u16 rom_ver2_val; bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - &phy_identifier); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + &phy_identifier); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - (phy_identifier & ~(1<<9))); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier & ~(1<<9))); bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - &rom_ver2_val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + &rom_ver2_val); /* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_ROM_VER2, - (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_ROM_VER2, + (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff)); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, - (phy_identifier | (1<<9))); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, + (phy_identifier | (1<<9))); return 0; } @@ -4946,11 +4978,11 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, switch (action) { case DISABLE_TX: - bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); + bnx2x_sfp_set_transmitter(params, phy, 0); break; case ENABLE_TX: if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) - bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); + bnx2x_sfp_set_transmitter(params, phy, 1); break; default: DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", @@ -4959,6 +4991,38 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, } } +static void bnx2x_set_sfp_module_fault_led(struct link_params *params, + u8 gpio_mode) +{ + struct bnx2x *bp = params->bp; + + u32 fault_led_gpio = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) & + PORT_HW_CFG_FAULT_MODULE_LED_MASK; + switch (fault_led_gpio) { + case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED: + return; + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2: + case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3: + { + u8 gpio_port = bnx2x_get_gpio_port(params); + u16 gpio_pin = fault_led_gpio - + PORT_HW_CFG_FAULT_MODULE_LED_GPIO0; + DP(NETIF_MSG_LINK, "Set fault module-detected led " + "pin %x port %x mode %x\n", + gpio_pin, gpio_port, gpio_mode); + bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port); + } + break; + default: + DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n", + fault_led_gpio); + } +} + static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, struct link_params *params) { @@ -4976,15 +5040,14 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) { DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); return -EINVAL; - } else if (bnx2x_verify_sfp_module(phy, params) != - 0) { + } else if (bnx2x_verify_sfp_module(phy, params) != 0) { /* check SFP+ module compatibility */ DP(NETIF_MSG_LINK, "Module verification failed!!\n"); rc = -EINVAL; /* Turn on fault module-detected led */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, - MISC_REGISTERS_GPIO_HIGH, - params->port); + bnx2x_set_sfp_module_fault_led(params, + MISC_REGISTERS_GPIO_HIGH); + if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) && ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) { @@ -4995,18 +5058,17 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, } } else { /* Turn off fault module-detected led */ - DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n"); - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, - MISC_REGISTERS_GPIO_LOW, - params->port); + bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); } /* power up the SFP module */ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) bnx2x_8727_power_module(bp, phy, 1); - /* Check and set limiting mode / LRM mode on 8726. - On 8727 it is done automatically */ + /* + * Check and set limiting mode / LRM mode on 8726. On 8727 it + * is done automatically + */ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) bnx2x_8726_set_limiting_mode(bp, phy, edc_mode); else @@ -5018,9 +5080,9 @@ static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy, if (rc == 0 || (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) != PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) - bnx2x_sfp_set_transmitter(bp, phy, params->port, 1); + bnx2x_sfp_set_transmitter(params, phy, 1); else - bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); + bnx2x_sfp_set_transmitter(params, phy, 0); return rc; } @@ -5033,11 +5095,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params) u8 port = params->port; /* Set valid module led off */ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, - MISC_REGISTERS_GPIO_HIGH, - params->port); + bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH); - /* Get current gpio val refelecting module plugged in / out*/ + /* Get current gpio val reflecting module plugged in / out*/ gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port); /* Call the handling function in case module is detected */ @@ -5053,18 +5113,20 @@ void bnx2x_handle_module_detect_int(struct link_params *params) DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); } else { u32 val = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_feature_config[params->port]. - config)); + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + config)); bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3, MISC_REGISTERS_GPIO_INT_OUTPUT_SET, port); - /* Module was plugged out. */ - /* Disable transmit for this module */ + /* + * Module was plugged out. + * Disable transmit for this module + */ if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) - bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); + bnx2x_sfp_set_transmitter(params, phy, 0); } } @@ -5100,9 +5162,9 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" " link_status 0x%x\n", rx_sd, pcs_status, val2); - /* link is up if both bit 0 of pmd_rx_sd and - * bit 0 of pcs_status are set, or if the autoneg bit - * 1 is set + /* + * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status + * are set, or if the autoneg bit 1 is set */ link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); if (link_up) { @@ -5123,14 +5185,15 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - u16 cnt, val; + u32 tx_en_mode; + u16 cnt, val, tmp1; struct bnx2x *bp = params->bp; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); /* HW reset */ bnx2x_ext_phy_hw_reset(bp, params->port); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040); - bnx2x_wait_reset_complete(bp, phy); + bnx2x_wait_reset_complete(bp, phy, params); /* Wait until fw is loaded */ for (cnt = 0; cnt < 100; cnt++) { @@ -5197,6 +5260,26 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, 0x0004); } bnx2x_save_bcm_spirom_ver(bp, phy, params->port); + + /* + * If TX Laser is controlled by GPIO_0, do not let PHY go into low + * power mode, if TX Laser is disabled + */ + + tx_en_mode = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) + & PORT_HW_CFG_TX_LASER_MASK; + + if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { + DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1); + tmp1 |= 0x1; + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1); + } + return 0; } @@ -5231,26 +5314,26 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy, /* Set soft reset */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0001); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0001); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_GEN_CTRL, - MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_GEN_CTRL, + MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); /* wait for 150ms for microcode load */ msleep(150); /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_MISC_CTRL1, 0x0000); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_MISC_CTRL1, 0x0000); msleep(200); bnx2x_save_bcm_spirom_ver(bp, phy, params->port); @@ -5285,23 +5368,18 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, u32 val; u32 swap_val, swap_override, aeu_gpio_mask, offset; DP(NETIF_MSG_LINK, "Initializing BCM8726\n"); - /* Restore normal power mode*/ - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); - - bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); - bnx2x_wait_reset_complete(bp, phy); + bnx2x_wait_reset_complete(bp, phy, params); bnx2x_8726_external_rom_boot(phy, params); - /* Need to call module detected on initialization since - the module detection triggered by actual module - insertion might occur before driver is loaded, and when - driver is loaded, it reset all registers, including the - transmitter */ + /* + * Need to call module detected on initialization since the module + * detection triggered by actual module insertion might occur before + * driver is loaded, and when driver is loaded, it reset all + * registers, including the transmitter + */ bnx2x_sfp_module_detection(phy, params); if (phy->req_line_speed == SPEED_1000) { @@ -5334,8 +5412,10 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); - /* Enable RX-ALARM control to receive - interrupt for 1G speed change */ + /* + * Enable RX-ALARM control to receive interrupt for 1G speed + * change + */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4); bnx2x_cl45_write(bp, phy, @@ -5367,7 +5447,7 @@ static u8 bnx2x_8726_config_init(struct bnx2x_phy *phy, /* Set GPIO3 to trigger SFP+ module insertion/removal */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, - MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port); + MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port); /* The GPIO should be swapped if the swap register is set and active */ swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); @@ -5458,7 +5538,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { u32 swap_val, swap_override; u8 port; - /** + /* * The PHY reset is controlled by GPIO 1. Fake the port number * to cancel the swap done in set_gpio() */ @@ -5467,20 +5547,21 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); port = (swap_val && swap_override) ^ 1; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); } static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - u16 tmp1, val, mod_abs; + u32 tx_en_mode; + u16 tmp1, val, mod_abs, tmp2; u16 rx_alarm_ctrl_val; u16 lasi_ctrl_val; struct bnx2x *bp = params->bp; /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ - bnx2x_wait_reset_complete(bp, phy); + bnx2x_wait_reset_complete(bp, phy, params); rx_alarm_ctrl_val = (1<<2) | (1<<5) ; lasi_ctrl_val = 0x0004; @@ -5493,14 +5574,17 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val); - /* Initially configure MOD_ABS to interrupt when - module is presence( bit 8) */ + /* + * Initially configure MOD_ABS to interrupt when module is + * presence( bit 8) + */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); - /* Set EDC off by setting OPTXLOS signal input to low - (bit 9). - When the EDC is off it locks onto a reference clock and - avoids becoming 'lost'.*/ + /* + * Set EDC off by setting OPTXLOS signal input to low (bit 9). + * When the EDC is off it locks onto a reference clock and avoids + * becoming 'lost' + */ mod_abs &= ~(1<<8); if (!(phy->flags & FLAGS_NOC)) mod_abs &= ~(1<<9); @@ -5515,7 +5599,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, if (phy->flags & FLAGS_NOC) val |= (3<<5); - /** + /* * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 * status which reflect SFP+ module over-current */ @@ -5542,7 +5626,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); - /** + /* * Power down the XAUI until link is up in case of dual-media * and 1G */ @@ -5568,7 +5652,7 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); } else { - /** + /* * Since the 8727 has only single reset pin, need to set the 10G * registers although it is default */ @@ -5584,7 +5668,8 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, 0x0008); } - /* Set 2-wire transfer rate of SFP+ module EEPROM + /* + * Set 2-wire transfer rate of SFP+ module EEPROM * to 100Khz since some DACs(direct attached cables) do * not work at 400Khz. */ @@ -5607,6 +5692,26 @@ static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy, phy->tx_preemphasis[1]); } + /* + * If TX Laser is controlled by GPIO_0, do not let PHY go into low + * power mode, if TX Laser is disabled + */ + tx_en_mode = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].sfp_ctrl)) + & PORT_HW_CFG_TX_LASER_MASK; + + if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) { + + DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n"); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2); + tmp2 |= 0x1000; + tmp2 &= 0xFFEF; + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2); + } + return 0; } @@ -5620,46 +5725,49 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, port_feature_config[params->port]. config)); bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); if (mod_abs & (1<<8)) { /* Module is absent */ DP(NETIF_MSG_LINK, "MOD_ABS indication " "show module is absent\n"); - /* 1. Set mod_abs to detect next module - presence event - 2. Set EDC off by setting OPTXLOS signal input to low - (bit 9). - When the EDC is off it locks onto a reference clock and - avoids becoming 'lost'.*/ + /* + * 1. Set mod_abs to detect next module + * presence event + * 2. Set EDC off by setting OPTXLOS signal input to low + * (bit 9). + * When the EDC is off it locks onto a reference clock and + * avoids becoming 'lost'. + */ mod_abs &= ~(1<<8); if (!(phy->flags & FLAGS_NOC)) mod_abs &= ~(1<<9); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - /* Clear RX alarm since it stays up as long as - the mod_abs wasn't changed */ + /* + * Clear RX alarm since it stays up as long as + * the mod_abs wasn't changed + */ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); } else { /* Module is present */ DP(NETIF_MSG_LINK, "MOD_ABS indication " "show module is present\n"); - /* First thing, disable transmitter, - and if the module is ok, the - module_detection will enable it*/ - - /* 1. Set mod_abs to detect next module - absent event ( bit 8) - 2. Restore the default polarity of the OPRXLOS signal and - this signal will then correctly indicate the presence or - absence of the Rx signal. (bit 9) */ + /* + * First disable transmitter, and if the module is ok, the + * module_detection will enable it + * 1. Set mod_abs to detect next module absent event ( bit 8) + * 2. Restore the default polarity of the OPRXLOS signal and + * this signal will then correctly indicate the presence or + * absence of the Rx signal. (bit 9) + */ mod_abs |= (1<<8); if (!(phy->flags & FLAGS_NOC)) mod_abs |= (1<<9); @@ -5667,10 +5775,12 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - /* Clear RX alarm since it stays up as long as - the mod_abs wasn't changed. This is need to be done - before calling the module detection, otherwise it will clear - the link update alarm */ + /* + * Clear RX alarm since it stays up as long as the mod_abs + * wasn't changed. This is need to be done before calling the + * module detection, otherwise it will clear* the link update + * alarm + */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM, &rx_alarm_status); @@ -5678,7 +5788,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) == PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) - bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); + bnx2x_sfp_set_transmitter(params, phy, 0); if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) bnx2x_sfp_module_detection(phy, params); @@ -5687,9 +5797,8 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, } DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", - rx_alarm_status); - /* No need to check link status in case of - module plugged in/out */ + rx_alarm_status); + /* No need to check link status in case of module plugged in/out */ } static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, @@ -5725,7 +5834,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); - /** + /* * If a module is present and there is need to check * for over current */ @@ -5745,12 +5854,8 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, " Please remove the SFP+ module and" " restart the system to clear this" " error.\n", - params->port); - - /* - * Disable all RX_ALARMs except for - * mod_abs - */ + params->port); + /* Disable all RX_ALARMs except for mod_abs */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5)); @@ -5793,11 +5898,15 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); - /* Bits 0..2 --> speed detected, - bits 13..15--> link is down */ + /* + * Bits 0..2 --> speed detected, + * Bits 13..15--> link is down + */ if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { link_up = 1; vars->line_speed = SPEED_10000; + DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n", + params->port); } else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) { link_up = 1; vars->line_speed = SPEED_1000; @@ -5819,7 +5928,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_GP, &val1); - /** + /* * In case of dual-media board and 1G, power up the XAUI side, * otherwise power it down. For 10G it is done automatically */ @@ -5839,7 +5948,7 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; /* Disable Transmitter */ - bnx2x_sfp_set_transmitter(bp, phy, params->port, 0); + bnx2x_sfp_set_transmitter(params, phy, 0); /* Clear LASI */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0); @@ -5851,19 +5960,23 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy, static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, struct link_params *params) { - u16 val, fw_ver1, fw_ver2, cnt; + u16 val, fw_ver1, fw_ver2, cnt, adj; struct bnx2x *bp = params->bp; + adj = 0; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + adj = -1; + /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/ /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009); for (cnt = 0; cnt < 100; cnt++) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val); if (val & 1) break; udelay(5); @@ -5877,11 +5990,11 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */ - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200); - bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A); for (cnt = 0; cnt < 100; cnt++) { - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val); if (val & 1) break; udelay(5); @@ -5894,9 +6007,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, } /* lower 16 bits of the register SPI_FW_STATUS */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1); /* upper 16 bits of register SPI_FW_STATUS */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2); + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2); bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1, phy->ver_addr); @@ -5905,49 +6018,53 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy, static void bnx2x_848xx_set_led(struct bnx2x *bp, struct bnx2x_phy *phy) { - u16 val; + u16 val, adj; + + adj = 0; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + adj = -1; /* PHYC_CTL_LED_CTL */ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, &val); + MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val); val &= 0xFE00; val |= 0x0092; bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, val); + MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, + MDIO_PMA_REG_8481_LED1_MASK + adj, 0x80); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, + MDIO_PMA_REG_8481_LED2_MASK + adj, 0x18); /* Select activity source by Tx and Rx, as suggested by PHY AE */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, + MDIO_PMA_REG_8481_LED3_MASK + adj, 0x0006); /* Select the closest activity blink rate to that in 10/100/1000 */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_BLINK, + MDIO_PMA_REG_8481_LED3_BLINK + adj, 0); bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val); + MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val); val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, - MDIO_PMA_REG_84823_CTL_LED_CTL_1, val); + MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val); /* 'Interrupt Mask' */ bnx2x_cl45_write(bp, phy, @@ -5961,7 +6078,11 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u16 autoneg_val, an_1000_val, an_10_100_val; - + /* + * This phy uses the NIG latch mechanism since link indication + * arrives through its LED4 and not via its LASI signal, so we + * get steady signal instead of clear on read + */ bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 1 << NIG_LATCH_BC_ENABLE_MI_INT); @@ -6086,11 +6207,11 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); /* HW reset */ bnx2x_ext_phy_hw_reset(bp, params->port); - bnx2x_wait_reset_complete(bp, phy); + bnx2x_wait_reset_complete(bp, phy, params); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); return bnx2x_848xx_cmn_config_init(phy, params, vars); @@ -6102,12 +6223,15 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u8 port, initialize = 1; - u16 val; + u16 val, adj; u16 temp; - u32 actual_phy_selection; + u32 actual_phy_selection, cms_enable; u8 rc = 0; /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ + adj = 0; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + adj = 3; msleep(1); if (CHIP_IS_E2(bp)) @@ -6117,11 +6241,12 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); - bnx2x_wait_reset_complete(bp, phy); + bnx2x_wait_reset_complete(bp, phy, params); /* Wait for GPHY to come out of reset */ msleep(50); - /* BCM84823 requires that XGXS links up first @ 10G for normal - behavior */ + /* + * BCM84823 requires that XGXS links up first @ 10G for normal behavior + */ temp = vars->line_speed; vars->line_speed = SPEED_10000; bnx2x_set_autoneg(¶ms->phy[INT_PHY], params, vars, 0); @@ -6131,7 +6256,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* Set dual-media configuration according to configuration */ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_MEDIA, &val); + MDIO_CTL_REG_84823_MEDIA + adj, &val); val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK | MDIO_CTL_REG_84823_MEDIA_LINE_MASK | MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN | @@ -6164,7 +6289,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G; bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, - MDIO_CTL_REG_84823_MEDIA, val); + MDIO_CTL_REG_84823_MEDIA + adj, val); DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n", params->multi_phy_config, val); @@ -6172,23 +6297,43 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, rc = bnx2x_848xx_cmn_config_init(phy, params, vars); else bnx2x_save_848xx_spirom_version(phy, params); + cms_enable = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_CMS_MASK; + + bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, &val); + if (cms_enable) + val |= MDIO_CTL_REG_84823_USER_CTRL_CMS; + else + val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS; + bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, + MDIO_CTL_REG_84823_USER_CTRL_REG, val); + + return rc; } static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) + struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 val, val1, val2; + u16 val, val1, val2, adj; u8 link_up = 0; + /* Reg offset adjustment for 84833 */ + adj = 0; + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + adj = -1; + /* Check 10G-BaseT link status */ /* Check PMD signal ok */ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, 0xFFFA, &val1); bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj, &val2); DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2); @@ -6273,9 +6418,9 @@ static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); + MISC_REGISTERS_GPIO_OUTPUT_LOW, 0); bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); + MISC_REGISTERS_GPIO_OUTPUT_LOW, 1); } static void bnx2x_8481_link_reset(struct bnx2x_phy *phy, @@ -6297,8 +6442,8 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, else port = params->port; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, - MISC_REGISTERS_GPIO_OUTPUT_LOW, - port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, + port); } static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, @@ -6353,24 +6498,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, /* Set LED masks */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x0); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x0); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED5_MASK, - 0x20); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x20); } else { bnx2x_cl45_write(bp, phy, @@ -6394,35 +6539,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, val |= 0x2492; bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LINK_SIGNAL, - val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LINK_SIGNAL, + val); /* Set LED masks */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x0); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x20); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x20); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x20); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x20); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED5_MASK, - 0x0); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x0); } else { bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x20); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x20); } break; @@ -6440,9 +6585,9 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, &val); if (!((val & - MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) - >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){ - DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n"); + MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK) + >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) { + DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n"); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LINK_SIGNAL, @@ -6451,24 +6596,24 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, /* Set LED masks */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED1_MASK, - 0x10); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED1_MASK, + 0x10); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED2_MASK, - 0x80); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED2_MASK, + 0x80); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED3_MASK, - 0x98); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED3_MASK, + 0x98); bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8481_LED5_MASK, - 0x40); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8481_LED5_MASK, + 0x40); } else { bnx2x_cl45_write(bp, phy, @@ -6513,10 +6658,10 @@ static u8 bnx2x_7101_config_init(struct bnx2x_phy *phy, /* Restore normal power mode*/ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port); /* HW reset */ bnx2x_ext_phy_hw_reset(bp, params->port); - bnx2x_wait_reset_complete(bp, phy); + bnx2x_wait_reset_complete(bp, phy, params); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1); @@ -6563,9 +6708,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", val2, val1); link_up = ((val1 & 4) == 4); - /* if link is up - * print the AN outcome of the SFX7101 PHY - */ + /* if link is up print the AN outcome of the SFX7101 PHY */ if (link_up) { bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, @@ -6599,20 +6742,20 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy) u16 val, cnt; bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_7101_RESET, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, &val); for (cnt = 0; cnt < 10; cnt++) { msleep(50); /* Writes a self-clearing reset */ bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_7101_RESET, - (val | (1<<15))); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, + (val | (1<<15))); /* Wait for clear */ bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_7101_RESET, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_7101_RESET, &val); if ((val & (1<<15)) == 0) break; @@ -6623,10 +6766,10 @@ static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy, struct link_params *params) { /* Low power mode is controlled by GPIO 2 */ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); /* The PHY reset is controlled by GPIO 1 */ bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1, - MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port); } static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy, @@ -6668,9 +6811,9 @@ static struct bnx2x_phy phy_null = { .supported = 0, .media_type = ETH_PHY_NOT_PRESENT, .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, .config_init = (config_init_t)NULL, @@ -6705,8 +6848,8 @@ static struct bnx2x_phy phy_serdes = { .media_type = ETH_PHY_UNSPECIFIED, .ver_addr = 0, .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, .config_init = (config_init_t)bnx2x_init_serdes, @@ -6742,8 +6885,8 @@ static struct bnx2x_phy phy_xgxs = { .media_type = ETH_PHY_UNSPECIFIED, .ver_addr = 0, .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, .config_init = (config_init_t)bnx2x_init_xgxs, @@ -6773,8 +6916,8 @@ static struct bnx2x_phy phy_7101 = { .media_type = ETH_PHY_BASE_T, .ver_addr = 0, .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, .config_init = (config_init_t)bnx2x_7101_config_init, @@ -6804,9 +6947,9 @@ static struct bnx2x_phy phy_8073 = { SUPPORTED_Asym_Pause), .media_type = ETH_PHY_UNSPECIFIED, .ver_addr = 0, - .req_flow_ctrl = 0, - .req_line_speed = 0, - .speed_cap_mask = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, .config_init = (config_init_t)bnx2x_8073_config_init, @@ -7015,6 +7158,43 @@ static struct bnx2x_phy phy_84823 = { .phy_specific_func = (phy_specific_func_t)NULL }; +static struct bnx2x_phy phy_84833 = { + .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, + .addr = 0xff, + .flags = FLAGS_FAN_FAILURE_DET_REQ | + FLAGS_REARM_LATCH_SIGNAL, + .def_md_devad = 0, + .reserved = 0, + .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, + .mdio_ctrl = 0, + .supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseT_Full | + SUPPORTED_TP | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .media_type = ETH_PHY_BASE_T, + .ver_addr = 0, + .req_flow_ctrl = 0, + .req_line_speed = 0, + .speed_cap_mask = 0, + .req_duplex = 0, + .rsrv = 0, + .config_init = (config_init_t)bnx2x_848x3_config_init, + .read_status = (read_status_t)bnx2x_848xx_read_status, + .link_reset = (link_reset_t)bnx2x_848x3_link_reset, + .config_loopback = (config_loopback_t)NULL, + .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, + .hw_reset = (hw_reset_t)NULL, + .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, + .phy_specific_func = (phy_specific_func_t)NULL +}; + /*****************************************************************/ /* */ /* Populate the phy according. Main function: bnx2x_populate_phy */ @@ -7028,7 +7208,7 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, /* Get the 4 lanes xgxs config rx and tx */ u32 rx = 0, tx = 0, i; for (i = 0; i < 2; i++) { - /** + /* * INT_PHY and EXT_PHY1 share the same value location in the * shmem. When num_phys is greater than 1, than this value * applies only to EXT_PHY1 @@ -7036,19 +7216,19 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base, if (phy_index == INT_PHY || phy_index == EXT_PHY1) { rx = REG_RD(bp, shmem_base + offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); + dev_info.port_hw_config[port].xgxs_config_rx[i<<1])); tx = REG_RD(bp, shmem_base + offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); + dev_info.port_hw_config[port].xgxs_config_tx[i<<1])); } else { rx = REG_RD(bp, shmem_base + offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); + dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); tx = REG_RD(bp, shmem_base + offsetof(struct shmem_region, - dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); + dev_info.port_hw_config[port].xgxs_config2_rx[i<<1])); } phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff); @@ -7168,6 +7348,9 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823: *phy = phy_84823; break; + case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: + *phy = phy_84833; + break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: *phy = phy_7101; break; @@ -7182,21 +7365,21 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); - /** - * The shmem address of the phy version is located on different - * structures. In case this structure is too old, do not set - * the address - */ + /* + * The shmem address of the phy version is located on different + * structures. In case this structure is too old, do not set + * the address + */ config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region, dev_info.shared_hw_config.config2)); if (phy_index == EXT_PHY1) { phy->ver_addr = shmem_base + offsetof(struct shmem_region, port_mb[port].ext_phy_fw_version); - /* Check specific mdc mdio settings */ - if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) - mdc_mdio_access = config2 & - SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; + /* Check specific mdc mdio settings */ + if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK) + mdc_mdio_access = config2 & + SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK; } else { u32 size = REG_RD(bp, shmem2_base); @@ -7215,7 +7398,7 @@ static u8 bnx2x_populate_ext_phy(struct bnx2x *bp, } phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port); - /** + /* * In case mdc/mdio_access of the external phy is different than the * mdc/mdio access of the XGXS, a HW lock must be taken in each access * to prevent one port interfere with another port's CL45 operations. @@ -7250,18 +7433,20 @@ static void bnx2x_phy_def_cfg(struct link_params *params, /* Populate the default phy configuration for MF mode */ if (phy_index == EXT_PHY2) { link_config = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. + offsetof(struct shmem_region, dev_info. port_feature_config[params->port].link_config2)); phy->speed_cap_mask = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. + offsetof(struct shmem_region, + dev_info. port_hw_config[params->port].speed_capability_mask2)); } else { link_config = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. + offsetof(struct shmem_region, dev_info. port_feature_config[params->port].link_config)); phy->speed_cap_mask = REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_hw_config[params->port].speed_capability_mask)); + offsetof(struct shmem_region, + dev_info. + port_hw_config[params->port].speed_capability_mask)); } DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask" " 0x%x\n", phy_index, link_config, phy->speed_cap_mask); @@ -7408,7 +7593,7 @@ static void set_phy_vars(struct link_params *params) else if (phy_index == EXT_PHY2) actual_phy_idx = EXT_PHY1; } - params->phy[actual_phy_idx].req_flow_ctrl = + params->phy[actual_phy_idx].req_flow_ctrl = params->req_flow_ctrl[link_cfg_idx]; params->phy[actual_phy_idx].req_line_speed = @@ -7461,57 +7646,6 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) set_phy_vars(params); DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys); - if (CHIP_REV_IS_FPGA(bp)) { - - vars->link_up = 1; - vars->line_speed = SPEED_10000; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD); - /* enable on E1.5 FPGA */ - if (CHIP_IS_E1H(bp)) { - vars->flow_ctrl |= - (BNX2X_FLOW_CTRL_TX | - BNX2X_FLOW_CTRL_RX); - vars->link_status |= - (LINK_STATUS_TX_FLOW_CONTROL_ENABLED | - LINK_STATUS_RX_FLOW_CONTROL_ENABLED); - } - - bnx2x_emac_enable(params, vars, 0); - if (!(CHIP_IS_E2(bp))) - bnx2x_pbf_update(params, vars->flow_ctrl, - vars->line_speed); - /* disable drain */ - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); - - /* update shared memory */ - bnx2x_update_mng(params, vars->link_status); - - return 0; - - } else - if (CHIP_REV_IS_EMUL(bp)) { - - vars->link_up = 1; - vars->line_speed = SPEED_10000; - vars->duplex = DUPLEX_FULL; - vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD); - - bnx2x_bmac_enable(params, vars, 0); - - bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed); - /* Disable drain */ - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE - + params->port*4, 0); - - /* update shared memory */ - bnx2x_update_mng(params, vars->link_status); - - return 0; - - } else if (params->loopback_mode == LOOPBACK_BMAC) { vars->link_up = 1; @@ -7527,8 +7661,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) /* set bmac loopback */ bnx2x_bmac_enable(params, vars, 1); - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + - params->port*4, 0); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); } else if (params->loopback_mode == LOOPBACK_EMAC) { @@ -7544,8 +7677,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) /* set bmac loopback */ bnx2x_emac_enable(params, vars, 1); bnx2x_emac_program(params, vars); - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + - params->port*4, 0); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); } else if ((params->loopback_mode == LOOPBACK_XGXS) || (params->loopback_mode == LOOPBACK_EXT_PHY)) { @@ -7568,8 +7700,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) bnx2x_emac_program(params, vars); bnx2x_emac_enable(params, vars, 0); } else - bnx2x_bmac_enable(params, vars, 0); - + bnx2x_bmac_enable(params, vars, 0); if (params->loopback_mode == LOOPBACK_XGXS) { /* set 10G XGXS loopback */ params->phy[INT_PHY].config_loopback( @@ -7587,9 +7718,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) params); } } - - REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + - params->port*4, 0); + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); @@ -7608,7 +7737,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars) return 0; } u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, - u8 reset_ext_phy) + u8 reset_ext_phy) { struct bnx2x *bp = params->bp; u8 phy_index, port = params->port, clear_latch_ind = 0; @@ -7617,10 +7746,10 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, vars->link_status = 0; bnx2x_update_mng(params, vars->link_status); bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, - (NIG_MASK_XGXS0_LINK_STATUS | - NIG_MASK_XGXS0_LINK10G | - NIG_MASK_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); /* activate nig drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); @@ -7719,21 +7848,22 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, /* disable attentions */ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port_of_path*4, - (NIG_MASK_XGXS0_LINK_STATUS | - NIG_MASK_XGXS0_LINK10G | - NIG_MASK_SERDES0_LINK_STATUS | - NIG_MASK_MI_INT)); + (NIG_MASK_XGXS0_LINK_STATUS | + NIG_MASK_XGXS0_LINK10G | + NIG_MASK_SERDES0_LINK_STATUS | + NIG_MASK_MI_INT)); /* Need to take the phy out of low power mode in order to write to access its registers */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); + MISC_REGISTERS_GPIO_OUTPUT_HIGH, + port); /* Reset the phy */ bnx2x_cl45_write(bp, &phy[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, - 1<<15); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_CTRL, + 1<<15); } /* Add delay of 150ms after reset */ @@ -7762,18 +7892,20 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, /* Only set bit 10 = 1 (Tx power down) */ bnx2x_cl45_read(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); /* Phase1 of TX_POWER_DOWN reset */ bnx2x_cl45_write(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, - (val | 1<<10)); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, + (val | 1<<10)); } - /* Toggle Transmitter: Power down and then up with 600ms - delay between */ + /* + * Toggle Transmitter: Power down and then up with 600ms delay + * between + */ msleep(600); /* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */ @@ -7781,25 +7913,25 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, /* Phase2 of POWER_DOWN_RESET */ /* Release bit 10 (Release Tx power down) */ bnx2x_cl45_read(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, &val); bnx2x_cl45_write(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); msleep(15); /* Read modify write the SPI-ROM version select register */ bnx2x_cl45_read(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_EDC_FFE_MAIN, &val); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, &val); bnx2x_cl45_write(bp, phy_blk[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); + MDIO_PMA_DEVAD, + MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12))); /* set GPIO2 back to LOW */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, - MISC_REGISTERS_GPIO_OUTPUT_LOW, port); + MISC_REGISTERS_GPIO_OUTPUT_LOW, port); } return 0; } @@ -7846,32 +7978,90 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, /* Set fault module detected LED on */ bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0, - MISC_REGISTERS_GPIO_HIGH, - port); + MISC_REGISTERS_GPIO_HIGH, + port); } return 0; } +static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base, + u8 *io_gpio, u8 *io_port) +{ + + u32 phy_gpio_reset = REG_RD(bp, shmem_base + + offsetof(struct shmem_region, + dev_info.port_hw_config[PORT_0].default_cfg)); + switch (phy_gpio_reset) { + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0: + *io_gpio = 0; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0: + *io_gpio = 1; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0: + *io_gpio = 2; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0: + *io_gpio = 3; + *io_port = 0; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1: + *io_gpio = 0; + *io_port = 1; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1: + *io_gpio = 1; + *io_port = 1; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1: + *io_gpio = 2; + *io_port = 1; + break; + case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1: + *io_gpio = 3; + *io_port = 1; + break; + default: + /* Don't override the io_gpio and io_port */ + break; + } +} static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], u32 shmem2_base_path[], u8 phy_index, u32 chip_id) { - s8 port; + s8 port, reset_gpio; u32 swap_val, swap_override; struct bnx2x_phy phy[PORT_MAX]; struct bnx2x_phy *phy_blk[PORT_MAX]; s8 port_of_path; - swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); - swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + swap_val = REG_RD(bp, NIG_REG_PORT_SWAP); + swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE); + reset_gpio = MISC_REGISTERS_GPIO_1; port = 1; - bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override)); + /* + * Retrieve the reset gpio/port which control the reset. + * Default is GPIO1, PORT1 + */ + bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0], + (u8 *)&reset_gpio, (u8 *)&port); /* Calculate the port based on port swap */ port ^= (swap_val && swap_override); + /* Initiate PHY reset*/ + bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, + port); + msleep(1); + bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, + port); + msleep(5); /* PART1 - Reset both phys */ @@ -7907,9 +8097,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, /* Reset the phy */ bnx2x_cl45_write(bp, &phy[port], - MDIO_PMA_DEVAD, - MDIO_PMA_REG_CTRL, - 1<<15); + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); } /* Add delay of 150ms after reset */ @@ -7923,7 +8111,7 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp, } /* PART2 - Download firmware to both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { - if (CHIP_IS_E2(bp)) + if (CHIP_IS_E2(bp)) port_of_path = 0; else port_of_path = port; @@ -7958,8 +8146,10 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: - /* GPIO1 affects both ports, so there's need to pull - it for single port alone */ + /* + * GPIO1 affects both ports, so there's need to pull + * it for single port alone + */ rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, shmem2_base_path, phy_index, chip_id); @@ -7969,11 +8159,15 @@ static u8 bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], break; default: DP(NETIF_MSG_LINK, - "bnx2x_common_init_phy: ext_phy 0x%x not required\n", - ext_phy_type); + "ext_phy 0x%x common init not required\n", + ext_phy_type); break; } + if (rc != 0) + netdev_err(bp->dev, "Warning: PHY was not initialized," + " Port %d\n", + 0); return rc; } @@ -7986,9 +8180,6 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[], u32 ext_phy_type, ext_phy_config; DP(NETIF_MSG_LINK, "Begin common phy init\n"); - if (CHIP_REV_IS_EMUL(bp)) - return 0; - /* Check if common init was already done */ phy_ver = REG_RD(bp, shmem_base_path[0] + offsetof(struct shmem_region, diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h index bedab1a942c4..92f36b6950dc 100644 --- a/drivers/net/bnx2x/bnx2x_link.h +++ b/drivers/net/bnx2x/bnx2x_link.h @@ -1,4 +1,4 @@ -/* Copyright 2008-2010 Broadcom Corporation +/* Copyright 2008-2011 Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you @@ -33,7 +33,7 @@ #define BNX2X_FLOW_CTRL_BOTH PORT_FEATURE_FLOW_CONTROL_BOTH #define BNX2X_FLOW_CTRL_NONE PORT_FEATURE_FLOW_CONTROL_NONE -#define SPEED_AUTO_NEG 0 +#define SPEED_AUTO_NEG 0 #define SPEED_12000 12000 #define SPEED_12500 12500 #define SPEED_13000 13000 @@ -44,8 +44,8 @@ #define SFP_EEPROM_VENDOR_NAME_SIZE 16 #define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 #define SFP_EEPROM_VENDOR_OUI_SIZE 3 -#define SFP_EEPROM_PART_NO_ADDR 0x28 -#define SFP_EEPROM_PART_NO_SIZE 16 +#define SFP_EEPROM_PART_NO_ADDR 0x28 +#define SFP_EEPROM_PART_NO_SIZE 16 #define PWR_FLT_ERR_MSG_LEN 250 #define XGXS_EXT_PHY_TYPE(ext_phy_config) \ @@ -62,7 +62,7 @@ #define SINGLE_MEDIA(params) (params->num_phys == 2) /* Dual Media board contains two external phy with different media */ #define DUAL_MEDIA(params) (params->num_phys == 3) -#define FW_PARAM_MDIO_CTRL_OFFSET 16 +#define FW_PARAM_MDIO_CTRL_OFFSET 16 #define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) @@ -201,12 +201,14 @@ struct link_params { /* Default / User Configuration */ u8 loopback_mode; -#define LOOPBACK_NONE 0 -#define LOOPBACK_EMAC 1 -#define LOOPBACK_BMAC 2 +#define LOOPBACK_NONE 0 +#define LOOPBACK_EMAC 1 +#define LOOPBACK_BMAC 2 #define LOOPBACK_XGXS 3 #define LOOPBACK_EXT_PHY 4 -#define LOOPBACK_EXT 5 +#define LOOPBACK_EXT 5 +#define LOOPBACK_UMAC 6 +#define LOOPBACK_XMAC 7 /* Device parameters */ u8 mac_addr[6]; @@ -230,10 +232,11 @@ struct link_params { /* Phy register parameter */ u32 chip_id; + /* features */ u32 feature_config_flags; -#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) -#define FEATURE_CONFIG_PFC_ENABLED (1<<1) -#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) +#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) +#define FEATURE_CONFIG_PFC_ENABLED (1<<1) +#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) /* Will be populated during common init */ struct bnx2x_phy phy[MAX_PHYS]; @@ -334,6 +337,11 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port); /* Reset the external of SFX7101 */ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy); +/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ +u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, + struct link_params *params, u16 addr, + u8 byte_cnt, u8 *o_buf); + void bnx2x_hw_reset_phy(struct link_params *params); /* Checks if HW lock is required for this phy/board type */ @@ -379,7 +387,7 @@ void bnx2x_ets_disabled(struct link_params *params); /* Used to configure the ETS to BW limited */ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw, - const u32 cos1_bw); + const u32 cos1_bw); /* Used to configure the ETS to strict */ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos); diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index aa032339e321..9d48659e3b28 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -586,7 +586,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); /* lock the dmae channel */ - mutex_lock(&bp->dmae_mutex); + spin_lock_bh(&bp->dmae_lock); /* reset completion */ *wb_comp = 0; @@ -617,7 +617,7 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]); unlock: - mutex_unlock(&bp->dmae_mutex); + spin_unlock_bh(&bp->dmae_lock); return rc; } @@ -1397,7 +1397,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, } smp_mb__before_atomic_inc(); - atomic_inc(&bp->spq_left); + atomic_inc(&bp->cq_spq_left); /* push the change in fp->state and towards the memory */ smp_wmb(); @@ -2484,8 +2484,14 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp, rxq_init->sge_map = fp->rx_sge_mapping; rxq_init->rcq_map = fp->rx_comp_mapping; rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE; - rxq_init->mtu = bp->dev->mtu; - rxq_init->buf_sz = bp->rx_buf_size; + + /* Always use mini-jumbo MTU for FCoE L2 ring */ + if (IS_FCOE_FP(fp)) + rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU; + else + rxq_init->mtu = bp->dev->mtu; + + rxq_init->buf_sz = fp->rx_buf_size; rxq_init->cl_qzone_id = fp->cl_qzone_id; rxq_init->cl_id = fp->cl_id; rxq_init->spcl_id = fp->cl_id; @@ -2737,11 +2743,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, spin_lock_bh(&bp->spq_lock); - if (!atomic_read(&bp->spq_left)) { - BNX2X_ERR("BUG! SPQ ring full!\n"); - spin_unlock_bh(&bp->spq_lock); - bnx2x_panic(); - return -EBUSY; + if (common) { + if (!atomic_read(&bp->eq_spq_left)) { + BNX2X_ERR("BUG! EQ ring full!\n"); + spin_unlock_bh(&bp->spq_lock); + bnx2x_panic(); + return -EBUSY; + } + } else if (!atomic_read(&bp->cq_spq_left)) { + BNX2X_ERR("BUG! SPQ ring full!\n"); + spin_unlock_bh(&bp->spq_lock); + bnx2x_panic(); + return -EBUSY; } spe = bnx2x_sp_get_next(bp); @@ -2772,20 +2785,26 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, spe->data.update_data_addr.lo = cpu_to_le32(data_lo); /* stats ramrod has it's own slot on the spq */ - if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) + if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) { /* It's ok if the actual decrement is issued towards the memory * somewhere between the spin_lock and spin_unlock. Thus no * more explict memory barrier is needed. */ - atomic_dec(&bp->spq_left); + if (common) + atomic_dec(&bp->eq_spq_left); + else + atomic_dec(&bp->cq_spq_left); + } + DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) " - "type(0x%x) left %x\n", + "type(0x%x) left (ETH, COMMON) (%x,%x)\n", bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) + (void *)bp->spq_prod_bd - (void *)bp->spq), command, - HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left)); + HW_CID(bp, cid), data_hi, data_lo, type, + atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left)); bnx2x_sp_prod_update(bp); spin_unlock_bh(&bp->spq_lock); @@ -3697,8 +3716,8 @@ static void bnx2x_eq_int(struct bnx2x *bp) sw_cons = bp->eq_cons; sw_prod = bp->eq_prod; - DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n", - hw_cons, sw_cons, atomic_read(&bp->spq_left)); + DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n", + hw_cons, sw_cons, atomic_read(&bp->eq_spq_left)); for (; sw_cons != hw_cons; sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) { @@ -3763,13 +3782,15 @@ static void bnx2x_eq_int(struct bnx2x *bp) case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN): case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG): DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); - bp->set_mac_pending = 0; + if (elem->message.data.set_mac_event.echo) + bp->set_mac_pending = 0; break; case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); - bp->set_mac_pending = 0; + if (elem->message.data.set_mac_event.echo) + bp->set_mac_pending = 0; break; default: /* unknown event log error and continue */ @@ -3781,7 +3802,7 @@ next_spqe: } /* for */ smp_mb__before_atomic_inc(); - atomic_add(spqe_cnt, &bp->spq_left); + atomic_add(spqe_cnt, &bp->eq_spq_left); bp->eq_cons = sw_cons; bp->eq_prod = sw_prod; @@ -4208,13 +4229,13 @@ void bnx2x_update_coalesce(struct bnx2x *bp) for_each_eth_queue(bp, i) bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, - bp->rx_ticks, bp->tx_ticks); + bp->tx_ticks, bp->rx_ticks); } static void bnx2x_init_sp_ring(struct bnx2x *bp) { spin_lock_init(&bp->spq_lock); - atomic_set(&bp->spq_left, MAX_SPQ_PENDING); + atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING); bp->spq_prod_idx = 0; bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX; @@ -4239,9 +4260,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) bp->eq_cons = 0; bp->eq_prod = NUM_EQ_DESC; bp->eq_cons_sb = BNX2X_EQ_INDEX; + /* we want a warning message before it gets rought... */ + atomic_set(&bp->eq_spq_left, + min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); } -static void bnx2x_init_ind_table(struct bnx2x *bp) +void bnx2x_push_indir_table(struct bnx2x *bp) { int func = BP_FUNC(bp); int i; @@ -4249,13 +4273,20 @@ static void bnx2x_init_ind_table(struct bnx2x *bp) if (bp->multi_mode == ETH_RSS_MODE_DISABLED) return; - DP(NETIF_MSG_IFUP, - "Initializing indirection table multi_mode %d\n", bp->multi_mode); for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, - bp->fp->cl_id + (i % (bp->num_queues - - NONE_ETH_CONTEXT_USE))); + bp->fp->cl_id + bp->rx_indir_table[i]); +} + +static void bnx2x_init_ind_table(struct bnx2x *bp) +{ + int i; + + for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) + bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp); + + bnx2x_push_indir_table(bp); } void bnx2x_set_storm_rx_mode(struct bnx2x *bp) @@ -5851,7 +5882,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) BP_ABS_FUNC(bp), load_code); bp->dmae_ready = 0; - mutex_init(&bp->dmae_mutex); + spin_lock_init(&bp->dmae_lock); rc = bnx2x_gunzip_init(bp); if (rc) return rc; @@ -6003,6 +6034,8 @@ void bnx2x_free_mem(struct bnx2x *bp) BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping, BCM_PAGE_SIZE * NUM_EQ_PAGES); + BNX2X_FREE(bp->rx_indir_table); + #undef BNX2X_PCI_FREE #undef BNX2X_KFREE } @@ -6133,6 +6166,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp) /* EQ */ BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, BCM_PAGE_SIZE * NUM_EQ_PAGES); + + BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) * + TSTORM_INDIRECTION_TABLE_SIZE); return 0; alloc_mem_err: @@ -6186,12 +6222,14 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac, int ramrod_flags = WAIT_RAMROD_COMMON; bp->set_mac_pending = 1; - smp_wmb(); config->hdr.length = 1; config->hdr.offset = cam_offset; config->hdr.client_id = 0xff; - config->hdr.reserved1 = 0; + /* Mark the single MAC configuration ramrod as opposed to a + * UC/MC list configuration). + */ + config->hdr.echo = 1; /* primary MAC */ config->config_table[0].msb_mac_addr = @@ -6223,6 +6261,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac, config->config_table[0].middle_mac_addr, config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec); + mb(); + bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, U64_HI(bnx2x_sp_mapping(bp, mac_config)), U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1); @@ -6287,20 +6327,15 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset) if (CHIP_IS_E1H(bp)) return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp); else if (CHIP_MODE_IS_4_PORT(bp)) - return BP_FUNC(bp) * 32 + rel_offset; + return E2_FUNC_MAX * rel_offset + BP_FUNC(bp); else - return BP_VN(bp) * 32 + rel_offset; + return E2_FUNC_MAX * rel_offset + BP_VN(bp); } /** * LLH CAM line allocations: currently only iSCSI and ETH macs are * relevant. In addition, current implementation is tuned for a * single ETH MAC. - * - * When multiple unicast ETH MACs PF configuration in switch - * independent mode is required (NetQ, multiple netdev MACs, - * etc.), consider better utilisation of 16 per function MAC - * entries in the LLH memory. */ enum { LLH_CAM_ISCSI_ETH_LINE = 0, @@ -6375,14 +6410,37 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set) bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1); } } -static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset) + +static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp) +{ + return CHIP_REV_IS_SLOW(bp) ? + (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) : + (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp))); +} + +/* set mc list, do not wait as wait implies sleep and + * set_rx_mode can be invoked from non-sleepable context. + * + * Instead we use the same ramrod data buffer each time we need + * to configure a list of addresses, and use the fact that the + * list of MACs is changed in an incremental way and that the + * function is called under the netif_addr_lock. A temporary + * inconsistent CAM configuration (possible in case of a very fast + * sequence of add/del/add on the host side) will shortly be + * restored by the handler of the last ramrod. + */ +static int bnx2x_set_e1_mc_list(struct bnx2x *bp) { int i = 0, old; struct net_device *dev = bp->dev; + u8 offset = bnx2x_e1_cam_mc_offset(bp); struct netdev_hw_addr *ha; struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); + if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) + return -EINVAL; + netdev_for_each_mc_addr(ha, dev) { /* copy mac */ config_cmd->config_table[i].msb_mac_addr = @@ -6423,32 +6481,47 @@ static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset) } } + wmb(); + config_cmd->hdr.length = i; config_cmd->hdr.offset = offset; config_cmd->hdr.client_id = 0xff; - config_cmd->hdr.reserved1 = 0; + /* Mark that this ramrod doesn't use bp->set_mac_pending for + * synchronization. + */ + config_cmd->hdr.echo = 0; - bp->set_mac_pending = 1; - smp_wmb(); + mb(); - bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); } -static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp) + +void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp) { int i; struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config); dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config); int ramrod_flags = WAIT_RAMROD_COMMON; + u8 offset = bnx2x_e1_cam_mc_offset(bp); - bp->set_mac_pending = 1; - smp_wmb(); - - for (i = 0; i < config_cmd->hdr.length; i++) + for (i = 0; i < BNX2X_MAX_MULTICAST; i++) SET_FLAG(config_cmd->config_table[i].flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE, T_ETH_MAC_COMMAND_INVALIDATE); + wmb(); + + config_cmd->hdr.length = BNX2X_MAX_MULTICAST; + config_cmd->hdr.offset = offset; + config_cmd->hdr.client_id = 0xff; + /* We'll wait for a completion this time... */ + config_cmd->hdr.echo = 1; + + bp->set_mac_pending = 1; + + mb(); + bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); @@ -6458,6 +6531,44 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp) } +/* Accept one or more multicasts */ +static int bnx2x_set_e1h_mc_list(struct bnx2x *bp) +{ + struct net_device *dev = bp->dev; + struct netdev_hw_addr *ha; + u32 mc_filter[MC_HASH_SIZE]; + u32 crc, bit, regidx; + int i; + + memset(mc_filter, 0, 4 * MC_HASH_SIZE); + + netdev_for_each_mc_addr(ha, dev) { + DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", + bnx2x_mc_addr(ha)); + + crc = crc32c_le(0, bnx2x_mc_addr(ha), + ETH_ALEN); + bit = (crc >> 24) & 0xff; + regidx = bit >> 5; + bit &= 0x1f; + mc_filter[regidx] |= (1 << bit); + } + + for (i = 0; i < MC_HASH_SIZE; i++) + REG_WR(bp, MC_HASH_OFFSET(bp, i), + mc_filter[i]); + + return 0; +} + +void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp) +{ + int i; + + for (i = 0; i < MC_HASH_SIZE; i++) + REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); +} + #ifdef BCM_CNIC /** * Set iSCSI MAC(s) at the next enties in the CAM after the ETH @@ -6476,12 +6587,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set) u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID + BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; u32 cl_bit_vec = (1 << iscsi_l2_cl_id); + u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; /* Send a SET_MAC ramrod */ - bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec, + bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec, cam_offset, 0); - bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE); + bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE); return 0; } @@ -7123,20 +7235,15 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) /* Give HW time to discard old tx messages */ msleep(1); - if (CHIP_IS_E1(bp)) { - /* invalidate mc list, - * wait and poll (interrupts are off) - */ - bnx2x_invlidate_e1_mc_list(bp); - bnx2x_set_eth_mac(bp, 0); - - } else { - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); + bnx2x_set_eth_mac(bp, 0); - bnx2x_set_eth_mac(bp, 0); + bnx2x_invalidate_uc_list(bp); - for (i = 0; i < MC_HASH_SIZE; i++) - REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); + if (CHIP_IS_E1(bp)) + bnx2x_invalidate_e1_mc_list(bp); + else { + bnx2x_invalidate_e1h_mc_list(bp); + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); } #ifdef BCM_CNIC @@ -8405,11 +8512,47 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) bp->common.shmem2_base); } +#ifdef BCM_CNIC +static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp) +{ + u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, + drv_lic_key[BP_PORT(bp)].max_iscsi_conn); + u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp, + drv_lic_key[BP_PORT(bp)].max_fcoe_conn); + + /* Get the number of maximum allowed iSCSI and FCoE connections */ + bp->cnic_eth_dev.max_iscsi_conn = + (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >> + BNX2X_MAX_ISCSI_INIT_CONN_SHIFT; + + bp->cnic_eth_dev.max_fcoe_conn = + (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >> + BNX2X_MAX_FCOE_INIT_CONN_SHIFT; + + BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n", + bp->cnic_eth_dev.max_iscsi_conn, + bp->cnic_eth_dev.max_fcoe_conn); + + /* If mamimum allowed number of connections is zero - + * disable the feature. + */ + if (!bp->cnic_eth_dev.max_iscsi_conn) + bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; + + if (!bp->cnic_eth_dev.max_fcoe_conn) + bp->flags |= NO_FCOE_FLAG; +} +#endif + static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) { u32 val, val2; int func = BP_ABS_FUNC(bp); int port = BP_PORT(bp); +#ifdef BCM_CNIC + u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac; + u8 *fip_mac = bp->fip_mac; +#endif if (BP_NOMCP(bp)) { BNX2X_ERROR("warning: random MAC workaround active\n"); @@ -8422,7 +8565,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2); #ifdef BCM_CNIC - /* iSCSI NPAR MAC */ + /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or + * FCoE MAC then the appropriate feature should be disabled. + */ if (IS_MF_SI(bp)) { u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) { @@ -8430,8 +8575,39 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) iscsi_mac_addr_upper); val = MF_CFG_RD(bp, func_ext_config[func]. iscsi_mac_addr_lower); - bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); - } + BNX2X_DEV_INFO("Read iSCSI MAC: " + "0x%x:0x%04x\n", val2, val); + bnx2x_set_mac_buf(iscsi_mac, val, val2); + + /* Disable iSCSI OOO if MAC configuration is + * invalid. + */ + if (!is_valid_ether_addr(iscsi_mac)) { + bp->flags |= NO_ISCSI_OOO_FLAG | + NO_ISCSI_FLAG; + memset(iscsi_mac, 0, ETH_ALEN); + } + } else + bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG; + + if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) { + val2 = MF_CFG_RD(bp, func_ext_config[func]. + fcoe_mac_addr_upper); + val = MF_CFG_RD(bp, func_ext_config[func]. + fcoe_mac_addr_lower); + BNX2X_DEV_INFO("Read FCoE MAC to " + "0x%x:0x%04x\n", val2, val); + bnx2x_set_mac_buf(fip_mac, val, val2); + + /* Disable FCoE if MAC configuration is + * invalid. + */ + if (!is_valid_ether_addr(fip_mac)) { + bp->flags |= NO_FCOE_FLAG; + memset(bp->fip_mac, 0, ETH_ALEN); + } + } else + bp->flags |= NO_FCOE_FLAG; } #endif } else { @@ -8445,7 +8621,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) iscsi_mac_upper); val = SHMEM_RD(bp, dev_info.port_hw_config[port]. iscsi_mac_lower); - bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); + bnx2x_set_mac_buf(iscsi_mac, val, val2); #endif } @@ -8453,14 +8629,12 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp) memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); #ifdef BCM_CNIC - /* Inform the upper layers about FCoE MAC */ + /* Set the FCoE MAC in modes other then MF_SI */ if (!CHIP_IS_E1x(bp)) { if (IS_MF_SD(bp)) - memcpy(bp->fip_mac, bp->dev->dev_addr, - sizeof(bp->fip_mac)); - else - memcpy(bp->fip_mac, bp->iscsi_mac, - sizeof(bp->fip_mac)); + memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN); + else if (!IS_MF(bp)) + memcpy(fip_mac, iscsi_mac, ETH_ALEN); } #endif } @@ -8623,6 +8797,10 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) /* Get MAC addresses */ bnx2x_get_mac_hwinfo(bp); +#ifdef BCM_CNIC + bnx2x_get_cnic_info(bp); +#endif + return rc; } @@ -8837,12 +9015,197 @@ static int bnx2x_close(struct net_device *dev) return 0; } +#define E1_MAX_UC_LIST 29 +#define E1H_MAX_UC_LIST 30 +#define E2_MAX_UC_LIST 14 +static inline u8 bnx2x_max_uc_list(struct bnx2x *bp) +{ + if (CHIP_IS_E1(bp)) + return E1_MAX_UC_LIST; + else if (CHIP_IS_E1H(bp)) + return E1H_MAX_UC_LIST; + else + return E2_MAX_UC_LIST; +} + + +static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp) +{ + if (CHIP_IS_E1(bp)) + /* CAM Entries for Port0: + * 0 - prim ETH MAC + * 1 - BCAST MAC + * 2 - iSCSI L2 ring ETH MAC + * 3-31 - UC MACs + * + * Port1 entries are allocated the same way starting from + * entry 32. + */ + return 3 + 32 * BP_PORT(bp); + else if (CHIP_IS_E1H(bp)) { + /* CAM Entries: + * 0-7 - prim ETH MAC for each function + * 8-15 - iSCSI L2 ring ETH MAC for each function + * 16 till 255 UC MAC lists for each function + * + * Remark: There is no FCoE support for E1H, thus FCoE related + * MACs are not considered. + */ + return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) + + bnx2x_max_uc_list(bp) * BP_FUNC(bp); + } else { + /* CAM Entries (there is a separate CAM per engine): + * 0-4 - prim ETH MAC for each function + * 4-7 - iSCSI L2 ring ETH MAC for each function + * 8-11 - FIP ucast L2 MAC for each function + * 12-15 - ALL_ENODE_MACS mcast MAC for each function + * 16 till 71 UC MAC lists for each function + */ + u8 func_idx = + (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp)); + + return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) + + bnx2x_max_uc_list(bp) * func_idx; + } +} + +/* set uc list, do not wait as wait implies sleep and + * set_rx_mode can be invoked from non-sleepable context. + * + * Instead we use the same ramrod data buffer each time we need + * to configure a list of addresses, and use the fact that the + * list of MACs is changed in an incremental way and that the + * function is called under the netif_addr_lock. A temporary + * inconsistent CAM configuration (possible in case of very fast + * sequence of add/del/add on the host side) will shortly be + * restored by the handler of the last ramrod. + */ +static int bnx2x_set_uc_list(struct bnx2x *bp) +{ + int i = 0, old; + struct net_device *dev = bp->dev; + u8 offset = bnx2x_uc_list_cam_offset(bp); + struct netdev_hw_addr *ha; + struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config); + dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config); + + if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp)) + return -EINVAL; + + netdev_for_each_uc_addr(ha, dev) { + /* copy mac */ + config_cmd->config_table[i].msb_mac_addr = + swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]); + config_cmd->config_table[i].middle_mac_addr = + swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]); + config_cmd->config_table[i].lsb_mac_addr = + swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]); + + config_cmd->config_table[i].vlan_id = 0; + config_cmd->config_table[i].pf_id = BP_FUNC(bp); + config_cmd->config_table[i].clients_bit_vector = + cpu_to_le32(1 << BP_L_ID(bp)); + + SET_FLAG(config_cmd->config_table[i].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_SET); + + DP(NETIF_MSG_IFUP, + "setting UCAST[%d] (%04x:%04x:%04x)\n", i, + config_cmd->config_table[i].msb_mac_addr, + config_cmd->config_table[i].middle_mac_addr, + config_cmd->config_table[i].lsb_mac_addr); + + i++; + + /* Set uc MAC in NIG */ + bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha), + LLH_CAM_ETH_LINE + i); + } + old = config_cmd->hdr.length; + if (old > i) { + for (; i < old; i++) { + if (CAM_IS_INVALID(config_cmd-> + config_table[i])) { + /* already invalidated */ + break; + } + /* invalidate */ + SET_FLAG(config_cmd->config_table[i].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_INVALIDATE); + } + } + + wmb(); + + config_cmd->hdr.length = i; + config_cmd->hdr.offset = offset; + config_cmd->hdr.client_id = 0xff; + /* Mark that this ramrod doesn't use bp->set_mac_pending for + * synchronization. + */ + config_cmd->hdr.echo = 0; + + mb(); + + return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, + U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); + +} + +void bnx2x_invalidate_uc_list(struct bnx2x *bp) +{ + int i; + struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config); + dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config); + int ramrod_flags = WAIT_RAMROD_COMMON; + u8 offset = bnx2x_uc_list_cam_offset(bp); + u8 max_list_size = bnx2x_max_uc_list(bp); + + for (i = 0; i < max_list_size; i++) { + SET_FLAG(config_cmd->config_table[i].flags, + MAC_CONFIGURATION_ENTRY_ACTION_TYPE, + T_ETH_MAC_COMMAND_INVALIDATE); + bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i); + } + + wmb(); + + config_cmd->hdr.length = max_list_size; + config_cmd->hdr.offset = offset; + config_cmd->hdr.client_id = 0xff; + /* We'll wait for a completion this time... */ + config_cmd->hdr.echo = 1; + + bp->set_mac_pending = 1; + + mb(); + + bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, + U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1); + + /* Wait for a completion */ + bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, + ramrod_flags); + +} + +static inline int bnx2x_set_mc_list(struct bnx2x *bp) +{ + /* some multicasts */ + if (CHIP_IS_E1(bp)) { + return bnx2x_set_e1_mc_list(bp); + } else { /* E1H and newer */ + return bnx2x_set_e1h_mc_list(bp); + } +} + /* called with netif_tx_lock from dev_mcast.c */ void bnx2x_set_rx_mode(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); u32 rx_mode = BNX2X_RX_MODE_NORMAL; - int port = BP_PORT(bp); if (bp->state != BNX2X_STATE_OPEN) { DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state); @@ -8853,47 +9216,16 @@ void bnx2x_set_rx_mode(struct net_device *dev) if (dev->flags & IFF_PROMISC) rx_mode = BNX2X_RX_MODE_PROMISC; - else if ((dev->flags & IFF_ALLMULTI) || - ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) && - CHIP_IS_E1(bp))) + else if (dev->flags & IFF_ALLMULTI) rx_mode = BNX2X_RX_MODE_ALLMULTI; - else { /* some multicasts */ - if (CHIP_IS_E1(bp)) { - /* - * set mc list, do not wait as wait implies sleep - * and set_rx_mode can be invoked from non-sleepable - * context - */ - u8 offset = (CHIP_REV_IS_SLOW(bp) ? - BNX2X_MAX_EMUL_MULTI*(1 + port) : - BNX2X_MAX_MULTICAST*(1 + port)); - - bnx2x_set_e1_mc_list(bp, offset); - } else { /* E1H */ - /* Accept one or more multicasts */ - struct netdev_hw_addr *ha; - u32 mc_filter[MC_HASH_SIZE]; - u32 crc, bit, regidx; - int i; - - memset(mc_filter, 0, 4 * MC_HASH_SIZE); - - netdev_for_each_mc_addr(ha, dev) { - DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", - bnx2x_mc_addr(ha)); - - crc = crc32c_le(0, bnx2x_mc_addr(ha), - ETH_ALEN); - bit = (crc >> 24) & 0xff; - regidx = bit >> 5; - bit &= 0x1f; - mc_filter[regidx] |= (1 << bit); - } + else { + /* some multicasts */ + if (bnx2x_set_mc_list(bp)) + rx_mode = BNX2X_RX_MODE_ALLMULTI; - for (i = 0; i < MC_HASH_SIZE; i++) - REG_WR(bp, MC_HASH_OFFSET(bp, i), - mc_filter[i]); - } + /* some unicasts */ + if (bnx2x_set_uc_list(bp)) + rx_mode = BNX2X_RX_MODE_PROMISC; } bp->rx_mode = rx_mode; @@ -8974,7 +9306,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_stop = bnx2x_close, .ndo_start_xmit = bnx2x_start_xmit, .ndo_select_queue = bnx2x_select_queue, - .ndo_set_multicast_list = bnx2x_set_rx_mode, + .ndo_set_rx_mode = bnx2x_set_rx_mode, .ndo_set_mac_address = bnx2x_change_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = bnx2x_ioctl, @@ -9120,7 +9452,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); dev->vlan_features |= NETIF_F_TSO6; -#ifdef BCM_DCB +#ifdef BCM_DCBNL dev->dcbnl_ops = &bnx2x_dcbnl_ops; #endif @@ -9527,6 +9859,11 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) } #endif +#ifdef BCM_DCBNL + /* Delete app tlvs from dcbnl */ + bnx2x_dcbnl_update_applist(bp, true); +#endif + unregister_netdev(dev); /* Delete all NAPI objects */ @@ -9800,15 +10137,21 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) HW_CID(bp, BNX2X_ISCSI_ETH_CID)); } - /* There may be not more than 8 L2 and COMMON SPEs and not more - * than 8 L5 SPEs in the air. + /* There may be not more than 8 L2 and not more than 8 L5 SPEs + * We also check that the number of outstanding + * COMMON ramrods is not more than the EQ and SPQ can + * accommodate. */ - if ((type == NONE_CONNECTION_TYPE) || - (type == ETH_CONNECTION_TYPE)) { - if (!atomic_read(&bp->spq_left)) + if (type == ETH_CONNECTION_TYPE) { + if (!atomic_read(&bp->cq_spq_left)) break; else - atomic_dec(&bp->spq_left); + atomic_dec(&bp->cq_spq_left); + } else if (type == NONE_CONNECTION_TYPE) { + if (!atomic_read(&bp->eq_spq_left)) + break; + else + atomic_dec(&bp->eq_spq_left); } else if ((type == ISCSI_CONNECTION_TYPE) || (type == FCOE_CONNECTION_TYPE)) { if (bp->cnic_spq_pending >= @@ -9886,7 +10229,8 @@ static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl) int rc = 0; mutex_lock(&bp->cnic_mutex); - c_ops = bp->cnic_ops; + c_ops = rcu_dereference_protected(bp->cnic_ops, + lockdep_is_held(&bp->cnic_mutex)); if (c_ops) rc = c_ops->cnic_ctl(bp->cnic_data, ctl); mutex_unlock(&bp->cnic_mutex); @@ -10000,7 +10344,7 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl) int count = ctl->data.credit.credit_count; smp_mb__before_atomic_inc(); - atomic_add(count, &bp->spq_left); + atomic_add(count, &bp->cq_spq_left); smp_mb__after_atomic_inc(); break; } @@ -10096,6 +10440,13 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) struct bnx2x *bp = netdev_priv(dev); struct cnic_eth_dev *cp = &bp->cnic_eth_dev; + /* If both iSCSI and FCoE are disabled - return NULL in + * order to indicate CNIC that it should not try to work + * with this device. + */ + if (NO_ISCSI(bp) && NO_FCOE(bp)) + return NULL; + cp->drv_owner = THIS_MODULE; cp->chip_id = CHIP_ID(bp); cp->pdev = bp->pdev; @@ -10116,6 +10467,15 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE; cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; + if (NO_ISCSI_OOO(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; + + if (NO_ISCSI(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI; + + if (NO_FCOE(bp)) + cp->drv_state |= CNIC_DRV_STATE_NO_FCOE; + DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " "starting cid %d\n", cp->ctx_blk_size, diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index e01330bb36c7..1c89f19a4425 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h @@ -6083,6 +6083,7 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_PMA_REG_8727_PCS_OPT_CTRL 0xc808 #define MDIO_PMA_REG_8727_GPIO_CTRL 0xc80e #define MDIO_PMA_REG_8727_PCS_GP 0xc842 +#define MDIO_PMA_REG_8727_OPT_CFG_REG 0xc8e4 #define MDIO_AN_REG_8727_MISC_CTRL 0x8309 diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile index 0e2737eac8b7..3c5c014e82b2 100644 --- a/drivers/net/bonding/Makefile +++ b/drivers/net/bonding/Makefile @@ -6,6 +6,9 @@ obj-$(CONFIG_BONDING) += bonding.o bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o +proc-$(CONFIG_PROC_FS) += bond_procfs.o +bonding-objs += $(proc-y) + ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o bonding-objs += $(ipv6-y) diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a5d5d0b5b155..494bf960442d 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -246,7 +246,7 @@ static inline void __enable_port(struct port *port) */ static inline int __port_is_enabled(struct port *port) { - return port->slave->state == BOND_STATE_ACTIVE; + return bond_is_active_slave(port->slave); } /** diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 5c6fba802f2b..9bc5de3e04a8 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -604,7 +604,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon _lock_rx_hashtbl(bond); - hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_src)); + hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst)); client_info = &(bond_info->rx_hashtbl[hash_index]); if (client_info->assigned) { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 163e0b06eaa5..1a6e9eb7af43 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -59,15 +59,12 @@ #include <linux/uaccess.h> #include <linux/errno.h> #include <linux/netdevice.h> -#include <linux/netpoll.h> #include <linux/inetdevice.h> #include <linux/igmp.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/rtnetlink.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> #include <linux/smp.h> #include <linux/if_ether.h> #include <net/arp.h> @@ -174,9 +171,6 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link atomic_t netpoll_block_tx = ATOMIC_INIT(0); #endif -static const char * const version = - DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; - int bond_net_id __read_mostly; static __be32 arp_target[BOND_MAX_ARP_TARGETS]; @@ -246,7 +240,7 @@ static void bond_uninit(struct net_device *bond_dev); /*---------------------------- General routines -----------------------------*/ -static const char *bond_mode_name(int mode) +const char *bond_mode_name(int mode) { static const char *names[] = { [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)", @@ -424,15 +418,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, { skb->dev = slave_dev; skb->priority = 1; -#ifdef CONFIG_NET_POLL_CONTROLLER - if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) { - struct netpoll *np = bond->dev->npinfo->netpoll; - slave_dev->npinfo = bond->dev->npinfo; - slave_dev->priv_flags |= IFF_IN_NETPOLL; - netpoll_send_skb_on_dev(np, skb, slave_dev); - slave_dev->priv_flags &= ~IFF_IN_NETPOLL; - } else -#endif + if (unlikely(netpoll_tx_running(slave_dev))) + bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); + else dev_queue_xmit(skb); return 0; @@ -1288,63 +1276,103 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave) } #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * You must hold read lock on bond->lock before calling this. - */ -static bool slaves_support_netpoll(struct net_device *bond_dev) +static inline int slave_enable_netpoll(struct slave *slave) { - struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave; - int i = 0; - bool ret = true; + struct netpoll *np; + int err = 0; - bond_for_each_slave(bond, slave, i) { - if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) || - !slave->dev->netdev_ops->ndo_poll_controller) - ret = false; + np = kzalloc(sizeof(*np), GFP_KERNEL); + err = -ENOMEM; + if (!np) + goto out; + + np->dev = slave->dev; + err = __netpoll_setup(np); + if (err) { + kfree(np); + goto out; } - return i != 0 && ret; + slave->np = np; +out: + return err; +} +static inline void slave_disable_netpoll(struct slave *slave) +{ + struct netpoll *np = slave->np; + + if (!np) + return; + + slave->np = NULL; + synchronize_rcu_bh(); + __netpoll_cleanup(np); + kfree(np); +} +static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) +{ + if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL) + return false; + if (!slave_dev->netdev_ops->ndo_poll_controller) + return false; + return true; } static void bond_poll_controller(struct net_device *bond_dev) { - struct bonding *bond = netdev_priv(bond_dev); +} + +static void __bond_netpoll_cleanup(struct bonding *bond) +{ struct slave *slave; int i; - bond_for_each_slave(bond, slave, i) { - if (slave->dev && IS_UP(slave->dev)) - netpoll_poll_dev(slave->dev); - } + bond_for_each_slave(bond, slave, i) + if (IS_UP(slave->dev)) + slave_disable_netpoll(slave); } - static void bond_netpoll_cleanup(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + + read_lock(&bond->lock); + __bond_netpoll_cleanup(bond); + read_unlock(&bond->lock); +} + +static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +{ + struct bonding *bond = netdev_priv(dev); struct slave *slave; - const struct net_device_ops *ops; - int i; + int i, err = 0; read_lock(&bond->lock); - bond_dev->npinfo = NULL; bond_for_each_slave(bond, slave, i) { - if (slave->dev) { - ops = slave->dev->netdev_ops; - if (ops->ndo_netpoll_cleanup) - ops->ndo_netpoll_cleanup(slave->dev); - else - slave->dev->npinfo = NULL; + err = slave_enable_netpoll(slave); + if (err) { + __bond_netpoll_cleanup(bond); + break; } } read_unlock(&bond->lock); + return err; } -#else +static struct netpoll_info *bond_netpoll_info(struct bonding *bond) +{ + return bond->dev->npinfo; +} +#else +static inline int slave_enable_netpoll(struct slave *slave) +{ + return 0; +} +static inline void slave_disable_netpoll(struct slave *slave) +{ +} static void bond_netpoll_cleanup(struct net_device *bond_dev) { } - #endif /*---------------------------------- IOCTL ----------------------------------*/ @@ -1372,8 +1400,8 @@ static int bond_compute_features(struct bonding *bond) { struct slave *slave; struct net_device *bond_dev = bond->dev; - unsigned long features = bond_dev->features; - unsigned long vlan_features = 0; + u32 features = bond_dev->features; + u32 vlan_features = 0; unsigned short max_hard_header_len = max((u16)ETH_HLEN, bond_dev->hard_header_len); int i; @@ -1400,8 +1428,8 @@ static int bond_compute_features(struct bonding *bond) done: features |= (bond_dev->features & BOND_VLAN_FEATURES); - bond_dev->features = netdev_fix_features(features, NULL); - bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL); + bond_dev->features = netdev_fix_features(bond_dev, features); + bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features); bond_dev->hard_header_len = max_hard_header_len; return 0; @@ -1423,6 +1451,77 @@ static void bond_setup_by_slave(struct net_device *bond_dev, bond->setup_by_slave = 1; } +/* On bonding slaves other than the currently active slave, suppress + * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and + * ARP on active-backup slaves with arp_validate enabled. + */ +static bool bond_should_deliver_exact_match(struct sk_buff *skb, + struct slave *slave, + struct bonding *bond) +{ + if (bond_is_slave_inactive(slave)) { + if (slave_do_arp_validate(bond, slave) && + skb->protocol == __cpu_to_be16(ETH_P_ARP)) + return false; + + if (bond->params.mode == BOND_MODE_ALB && + skb->pkt_type != PACKET_BROADCAST && + skb->pkt_type != PACKET_MULTICAST) + return false; + + if (bond->params.mode == BOND_MODE_8023AD && + skb->protocol == __cpu_to_be16(ETH_P_SLOW)) + return false; + + return true; + } + return false; +} + +static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct slave *slave; + struct net_device *bond_dev; + struct bonding *bond; + + slave = bond_slave_get_rcu(skb->dev); + bond_dev = ACCESS_ONCE(slave->dev->master); + if (unlikely(!bond_dev)) + return RX_HANDLER_PASS; + + skb = skb_share_check(skb, GFP_ATOMIC); + if (unlikely(!skb)) + return RX_HANDLER_CONSUMED; + + *pskb = skb; + + bond = netdev_priv(bond_dev); + + if (bond->params.arp_interval) + slave->dev->last_rx = jiffies; + + if (bond_should_deliver_exact_match(skb, slave, bond)) { + return RX_HANDLER_EXACT; + } + + skb->dev = bond_dev; + + if (bond->params.mode == BOND_MODE_ALB && + bond_dev->priv_flags & IFF_BRIDGE_PORT && + skb->pkt_type == PACKET_HOST) { + + if (unlikely(skb_cow_head(skb, + skb->data - skb_mac_header(skb)))) { + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + memcpy(eth_hdr(skb)->h_dest, bond_dev->dev_addr, ETH_ALEN); + } + + return RX_HANDLER_ANOTHER; +} + /* enslave device <slave> to bond device <master> */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) { @@ -1594,16 +1693,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } } - res = netdev_set_master(slave_dev, bond_dev); + res = netdev_set_bond_master(slave_dev, bond_dev); if (res) { - pr_debug("Error %d calling netdev_set_master\n", res); + pr_debug("Error %d calling netdev_set_bond_master\n", res); goto err_restore_mac; } + res = netdev_rx_handler_register(slave_dev, bond_handle_frame, + new_slave); + if (res) { + pr_debug("Error %d calling netdev_rx_handler_register\n", res); + goto err_unset_master; + } + /* open the slave since the application closed it */ res = dev_open(slave_dev); if (res) { pr_debug("Opening slave %s failed\n", slave_dev->name); - goto err_unset_master; + goto err_unreg_rxhandler; } new_slave->dev = slave_dev; @@ -1757,7 +1863,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) break; case BOND_MODE_TLB: case BOND_MODE_ALB: - new_slave->state = BOND_STATE_ACTIVE; + bond_set_active_slave(new_slave); bond_set_slave_inactive_flags(new_slave); bond_select_active_slave(bond); break; @@ -1765,7 +1871,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) pr_debug("This slave is always active in trunk mode\n"); /* always active in trunk mode */ - new_slave->state = BOND_STATE_ACTIVE; + bond_set_active_slave(new_slave); /* In trunking mode there is little meaning to curr_active_slave * anyway (it holds no special properties of the bond device), @@ -1782,17 +1888,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_set_carrier(bond); #ifdef CONFIG_NET_POLL_CONTROLLER - if (slaves_support_netpoll(bond_dev)) { - bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; - if (bond_dev->npinfo) - slave_dev->npinfo = bond_dev->npinfo; - } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { - bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; - pr_info("New slave device %s does not support netpoll\n", - slave_dev->name); - pr_info("Disabling netpoll support for %s\n", bond_dev->name); + slave_dev->npinfo = bond_netpoll_info(bond); + if (slave_dev->npinfo) { + if (slave_enable_netpoll(new_slave)) { + read_unlock(&bond->lock); + pr_info("Error, %s: master_dev is using netpoll, " + "but new slave device does not support netpoll.\n", + bond_dev->name); + res = -EBUSY; + goto err_close; + } } #endif + read_unlock(&bond->lock); res = bond_create_slave_symlinks(bond_dev, slave_dev); @@ -1801,7 +1909,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) pr_info("%s: enslaving %s as a%s interface with a%s link.\n", bond_dev->name, slave_dev->name, - new_slave->state == BOND_STATE_ACTIVE ? "n active" : " backup", + bond_is_active_slave(new_slave) ? "n active" : " backup", new_slave->link != BOND_LINK_DOWN ? "n up" : " down"); /* enslave is successful */ @@ -1811,8 +1919,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) err_close: dev_close(slave_dev); +err_unreg_rxhandler: + netdev_rx_handler_unregister(slave_dev); + synchronize_net(); + err_unset_master: - netdev_set_master(slave_dev, NULL); + netdev_set_bond_master(slave_dev, NULL); err_restore_mac: if (!bond->params.fail_over_mac) { @@ -1895,7 +2007,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) pr_info("%s: releasing %s interface %s\n", bond_dev->name, - (slave->state == BOND_STATE_ACTIVE) ? "active" : "backup", + bond_is_active_slave(slave) ? "active" : "backup", slave_dev->name); oldcurrent = bond->curr_active_slave; @@ -1992,19 +2104,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) netif_addr_unlock_bh(bond_dev); } - netdev_set_master(slave_dev, NULL); + netdev_rx_handler_unregister(slave_dev); + synchronize_net(); + netdev_set_bond_master(slave_dev, NULL); -#ifdef CONFIG_NET_POLL_CONTROLLER - read_lock_bh(&bond->lock); - - if (slaves_support_netpoll(bond_dev)) - bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; - read_unlock_bh(&bond->lock); - if (slave_dev->netdev_ops->ndo_netpoll_cleanup) - slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev); - else - slave_dev->npinfo = NULL; -#endif + slave_disable_netpoll(slave); /* close slave before restoring its mac address */ dev_close(slave_dev); @@ -2018,9 +2122,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) dev_set_mtu(slave_dev, slave->original_mtu); - slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | - IFF_SLAVE_INACTIVE | IFF_BONDING | - IFF_SLAVE_NEEDARP); + slave_dev->priv_flags &= ~IFF_BONDING; kfree(slave); @@ -2039,6 +2141,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, ret = bond_release(bond_dev, slave_dev); if ((ret == 0) && (bond->slave_cnt == 0)) { + bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; pr_info("%s: destroying bond %s.\n", bond_dev->name, bond_dev->name); unregister_netdevice(bond_dev); @@ -2114,7 +2217,11 @@ static int bond_release_all(struct net_device *bond_dev) netif_addr_unlock_bh(bond_dev); } - netdev_set_master(slave_dev, NULL); + netdev_rx_handler_unregister(slave_dev); + synchronize_net(); + netdev_set_bond_master(slave_dev, NULL); + + slave_disable_netpoll(slave); /* close slave before restoring its mac address */ dev_close(slave_dev); @@ -2126,9 +2233,6 @@ static int bond_release_all(struct net_device *bond_dev) dev_set_mac_address(slave_dev, &addr); } - slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | - IFF_SLAVE_INACTIVE); - kfree(slave); /* re-acquire the lock before getting the next slave */ @@ -2242,7 +2346,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in res = 0; strcpy(info->slave_name, slave->dev->name); info->link = slave->link; - info->state = slave->state; + info->state = bond_slave_state(slave); info->link_failure_count = slave->link_failure_count; break; } @@ -2281,7 +2385,7 @@ static int bond_miimon_inspect(struct bonding *bond) bond->dev->name, (bond->params.mode == BOND_MODE_ACTIVEBACKUP) ? - ((slave->state == BOND_STATE_ACTIVE) ? + (bond_is_active_slave(slave) ? "active " : "backup ") : "", slave->dev->name, bond->params.downdelay * bond->params.miimon); @@ -2372,13 +2476,13 @@ static void bond_miimon_commit(struct bonding *bond) if (bond->params.mode == BOND_MODE_8023AD) { /* prevent it from being the active one */ - slave->state = BOND_STATE_BACKUP; + bond_set_backup_slave(slave); } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { /* make it immediately active */ - slave->state = BOND_STATE_ACTIVE; + bond_set_active_slave(slave); } else if (slave != bond->primary_slave) { /* prevent it from being the active one */ - slave->state = BOND_STATE_BACKUP; + bond_set_backup_slave(slave); } bond_update_speed_duplex(slave); @@ -2571,11 +2675,10 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) { - int i, vlan_id, rv; + int i, vlan_id; __be32 *targets = bond->params.arp_targets; struct vlan_entry *vlan; struct net_device *vlan_dev; - struct flowi fl; struct rtable *rt; for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { @@ -2594,15 +2697,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) * determine which VLAN interface would be used, so we * can tag the ARP with the proper VLAN tag. */ - memset(&fl, 0, sizeof(fl)); - fl.fl4_dst = targets[i]; - fl.fl4_tos = RTO_ONLINK; - - rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl); - if (rv) { + rt = ip_route_output(dev_net(bond->dev), targets[i], 0, + RTO_ONLINK, 0); + if (IS_ERR(rt)) { if (net_ratelimit()) { pr_warning("%s: no route to arp_ip_target %pI4\n", - bond->dev->name, &fl.fl4_dst); + bond->dev->name, &targets[i]); } continue; } @@ -2638,7 +2738,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) if (net_ratelimit()) { pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", - bond->dev->name, &fl.fl4_dst, + bond->dev->name, &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL"); } ip_rt_put(rt); @@ -2756,7 +2856,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack memcpy(&tip, arp_ptr, 4); pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n", - bond->dev->name, slave->dev->name, slave->state, + bond->dev->name, slave->dev->name, bond_slave_state(slave), bond->params.arp_validate, slave_do_arp_validate(bond, slave), &sip, &tip); @@ -2768,7 +2868,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack * the active, through one switch, the router, then the other * switch before reaching the backup. */ - if (slave->state == BOND_STATE_ACTIVE) + if (bond_is_active_slave(slave)) bond_validate_arp(bond, slave, sip, tip); else bond_validate_arp(bond, slave, tip, sip); @@ -2830,7 +2930,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) slave->dev->last_rx + delta_in_ticks)) { slave->link = BOND_LINK_UP; - slave->state = BOND_STATE_ACTIVE; + bond_set_active_slave(slave); /* primary_slave has no meaning in round-robin * mode. the window of a slave being up and @@ -2863,7 +2963,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) slave->dev->last_rx + 2 * delta_in_ticks)) { slave->link = BOND_LINK_DOWN; - slave->state = BOND_STATE_BACKUP; + bond_set_backup_slave(slave); if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; @@ -2957,7 +3057,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) * gives each slave a chance to tx/rx traffic * before being taken out */ - if (slave->state == BOND_STATE_BACKUP && + if (!bond_is_active_slave(slave) && !bond->current_arp_slave && !time_in_range(jiffies, slave_last_rx(bond, slave) - delta_in_ticks, @@ -2974,7 +3074,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) * the bond has an IP address) */ trans_start = dev_trans_start(slave->dev); - if ((slave->state == BOND_STATE_ACTIVE) && + if (bond_is_active_slave(slave) && (!time_in_range(jiffies, trans_start - delta_in_ticks, trans_start + 2 * delta_in_ticks) || @@ -3182,299 +3282,6 @@ out: read_unlock(&bond->lock); } -/*------------------------------ proc/seq_file-------------------------------*/ - -#ifdef CONFIG_PROC_FS - -static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(RCU) - __acquires(&bond->lock) -{ - struct bonding *bond = seq->private; - loff_t off = 0; - struct slave *slave; - int i; - - /* make sure the bond won't be taken away */ - rcu_read_lock(); - read_lock(&bond->lock); - - if (*pos == 0) - return SEQ_START_TOKEN; - - bond_for_each_slave(bond, slave, i) { - if (++off == *pos) - return slave; - } - - return NULL; -} - -static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) -{ - struct bonding *bond = seq->private; - struct slave *slave = v; - - ++*pos; - if (v == SEQ_START_TOKEN) - return bond->first_slave; - - slave = slave->next; - - return (slave == bond->first_slave) ? NULL : slave; -} - -static void bond_info_seq_stop(struct seq_file *seq, void *v) - __releases(&bond->lock) - __releases(RCU) -{ - struct bonding *bond = seq->private; - - read_unlock(&bond->lock); - rcu_read_unlock(); -} - -static void bond_info_show_master(struct seq_file *seq) -{ - struct bonding *bond = seq->private; - struct slave *curr; - int i; - - read_lock(&bond->curr_slave_lock); - curr = bond->curr_active_slave; - read_unlock(&bond->curr_slave_lock); - - seq_printf(seq, "Bonding Mode: %s", - bond_mode_name(bond->params.mode)); - - if (bond->params.mode == BOND_MODE_ACTIVEBACKUP && - bond->params.fail_over_mac) - seq_printf(seq, " (fail_over_mac %s)", - fail_over_mac_tbl[bond->params.fail_over_mac].modename); - - seq_printf(seq, "\n"); - - if (bond->params.mode == BOND_MODE_XOR || - bond->params.mode == BOND_MODE_8023AD) { - seq_printf(seq, "Transmit Hash Policy: %s (%d)\n", - xmit_hashtype_tbl[bond->params.xmit_policy].modename, - bond->params.xmit_policy); - } - - if (USES_PRIMARY(bond->params.mode)) { - seq_printf(seq, "Primary Slave: %s", - (bond->primary_slave) ? - bond->primary_slave->dev->name : "None"); - if (bond->primary_slave) - seq_printf(seq, " (primary_reselect %s)", - pri_reselect_tbl[bond->params.primary_reselect].modename); - - seq_printf(seq, "\nCurrently Active Slave: %s\n", - (curr) ? curr->dev->name : "None"); - } - - seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ? - "up" : "down"); - seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon); - seq_printf(seq, "Up Delay (ms): %d\n", - bond->params.updelay * bond->params.miimon); - seq_printf(seq, "Down Delay (ms): %d\n", - bond->params.downdelay * bond->params.miimon); - - - /* ARP information */ - if (bond->params.arp_interval > 0) { - int printed = 0; - seq_printf(seq, "ARP Polling Interval (ms): %d\n", - bond->params.arp_interval); - - seq_printf(seq, "ARP IP target/s (n.n.n.n form):"); - - for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { - if (!bond->params.arp_targets[i]) - break; - if (printed) - seq_printf(seq, ","); - seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); - printed = 1; - } - seq_printf(seq, "\n"); - } - - if (bond->params.mode == BOND_MODE_8023AD) { - struct ad_info ad_info; - - seq_puts(seq, "\n802.3ad info\n"); - seq_printf(seq, "LACP rate: %s\n", - (bond->params.lacp_fast) ? "fast" : "slow"); - seq_printf(seq, "Aggregator selection policy (ad_select): %s\n", - ad_select_tbl[bond->params.ad_select].modename); - - if (bond_3ad_get_active_agg_info(bond, &ad_info)) { - seq_printf(seq, "bond %s has no active aggregator\n", - bond->dev->name); - } else { - seq_printf(seq, "Active Aggregator Info:\n"); - - seq_printf(seq, "\tAggregator ID: %d\n", - ad_info.aggregator_id); - seq_printf(seq, "\tNumber of ports: %d\n", - ad_info.ports); - seq_printf(seq, "\tActor Key: %d\n", - ad_info.actor_key); - seq_printf(seq, "\tPartner Key: %d\n", - ad_info.partner_key); - seq_printf(seq, "\tPartner Mac Address: %pM\n", - ad_info.partner_system); - } - } -} - -static void bond_info_show_slave(struct seq_file *seq, - const struct slave *slave) -{ - struct bonding *bond = seq->private; - - seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); - seq_printf(seq, "MII Status: %s\n", - (slave->link == BOND_LINK_UP) ? "up" : "down"); - seq_printf(seq, "Speed: %d Mbps\n", slave->speed); - seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half"); - seq_printf(seq, "Link Failure Count: %u\n", - slave->link_failure_count); - - seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); - - if (bond->params.mode == BOND_MODE_8023AD) { - const struct aggregator *agg - = SLAVE_AD_INFO(slave).port.aggregator; - - if (agg) - seq_printf(seq, "Aggregator ID: %d\n", - agg->aggregator_identifier); - else - seq_puts(seq, "Aggregator ID: N/A\n"); - } - seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); -} - -static int bond_info_seq_show(struct seq_file *seq, void *v) -{ - if (v == SEQ_START_TOKEN) { - seq_printf(seq, "%s\n", version); - bond_info_show_master(seq); - } else - bond_info_show_slave(seq, v); - - return 0; -} - -static const struct seq_operations bond_info_seq_ops = { - .start = bond_info_seq_start, - .next = bond_info_seq_next, - .stop = bond_info_seq_stop, - .show = bond_info_seq_show, -}; - -static int bond_info_open(struct inode *inode, struct file *file) -{ - struct seq_file *seq; - struct proc_dir_entry *proc; - int res; - - res = seq_open(file, &bond_info_seq_ops); - if (!res) { - /* recover the pointer buried in proc_dir_entry data */ - seq = file->private_data; - proc = PDE(inode); - seq->private = proc->data; - } - - return res; -} - -static const struct file_operations bond_info_fops = { - .owner = THIS_MODULE, - .open = bond_info_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static void bond_create_proc_entry(struct bonding *bond) -{ - struct net_device *bond_dev = bond->dev; - struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); - - if (bn->proc_dir) { - bond->proc_entry = proc_create_data(bond_dev->name, - S_IRUGO, bn->proc_dir, - &bond_info_fops, bond); - if (bond->proc_entry == NULL) - pr_warning("Warning: Cannot create /proc/net/%s/%s\n", - DRV_NAME, bond_dev->name); - else - memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); - } -} - -static void bond_remove_proc_entry(struct bonding *bond) -{ - struct net_device *bond_dev = bond->dev; - struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); - - if (bn->proc_dir && bond->proc_entry) { - remove_proc_entry(bond->proc_file_name, bn->proc_dir); - memset(bond->proc_file_name, 0, IFNAMSIZ); - bond->proc_entry = NULL; - } -} - -/* Create the bonding directory under /proc/net, if doesn't exist yet. - * Caller must hold rtnl_lock. - */ -static void __net_init bond_create_proc_dir(struct bond_net *bn) -{ - if (!bn->proc_dir) { - bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net); - if (!bn->proc_dir) - pr_warning("Warning: cannot create /proc/net/%s\n", - DRV_NAME); - } -} - -/* Destroy the bonding directory under /proc/net, if empty. - * Caller must hold rtnl_lock. - */ -static void __net_exit bond_destroy_proc_dir(struct bond_net *bn) -{ - if (bn->proc_dir) { - remove_proc_entry(DRV_NAME, bn->net->proc_net); - bn->proc_dir = NULL; - } -} - -#else /* !CONFIG_PROC_FS */ - -static void bond_create_proc_entry(struct bonding *bond) -{ -} - -static void bond_remove_proc_entry(struct bonding *bond) -{ -} - -static inline void bond_create_proc_dir(struct bond_net *bn) -{ -} - -static inline void bond_destroy_proc_dir(struct bond_net *bn) -{ -} - -#endif /* CONFIG_PROC_FS */ - - /*-------------------------- netdev event handling --------------------------*/ /* @@ -4331,7 +4138,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev bond_for_each_slave_from(bond, slave, i, start_at) { if (IS_UP(slave->dev) && (slave->link == BOND_LINK_UP) && - (slave->state == BOND_STATE_ACTIVE)) { + bond_is_active_slave(slave)) { res = bond_dev_queue_xmit(bond, skb, slave->dev); break; } @@ -4408,7 +4215,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) bond_for_each_slave_from(bond, slave, i, start_at) { if (IS_UP(slave->dev) && (slave->link == BOND_LINK_UP) && - (slave->state == BOND_STATE_ACTIVE)) { + bond_is_active_slave(slave)) { res = bond_dev_queue_xmit(bond, skb, slave->dev); break; } @@ -4449,7 +4256,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev) bond_for_each_slave_from(bond, slave, i, start_at) { if (IS_UP(slave->dev) && (slave->link == BOND_LINK_UP) && - (slave->state == BOND_STATE_ACTIVE)) { + bond_is_active_slave(slave)) { if (tx_dev) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) { @@ -4537,11 +4344,18 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) { /* * This helper function exists to help dev_pick_tx get the correct - * destination queue. Using a helper function skips the a call to + * destination queue. Using a helper function skips a call to * skb_tx_hash and will put the skbs in the queue we expect on their * way down to the bonding driver. */ - return skb->queue_mapping; + u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; + + if (unlikely(txq >= dev->real_num_tx_queues)) { + do + txq -= dev->real_num_tx_queues; + while (txq >= dev->real_num_tx_queues); + } + return txq; } static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -4603,11 +4417,9 @@ void bond_set_mode_ops(struct bonding *bond, int mode) case BOND_MODE_BROADCAST: break; case BOND_MODE_8023AD: - bond_set_master_3ad_flags(bond); bond_set_xmit_hash_policy(bond); break; case BOND_MODE_ALB: - bond_set_master_alb_flags(bond); /* FALLTHRU */ case BOND_MODE_TLB: break; @@ -4654,9 +4466,12 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_netpoll_setup = bond_netpoll_setup, .ndo_netpoll_cleanup = bond_netpoll_cleanup, .ndo_poll_controller = bond_poll_controller, #endif + .ndo_add_slave = bond_enslave, + .ndo_del_slave = bond_release, }; static void bond_destructor(struct net_device *bond_dev) @@ -4695,9 +4510,6 @@ static void bond_setup(struct net_device *bond_dev) bond_dev->priv_flags |= IFF_BONDING; bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; - if (bond->params.arp_interval) - bond_dev->priv_flags |= IFF_MASTER_ARPMON; - /* At first, we block adding VLANs. That's the only way to * prevent problems that occur when adding VLANs over an * empty bond. The block will be removed once non-challenged @@ -5166,8 +4978,6 @@ static int bond_init(struct net_device *bond_dev) bond_set_lockdep_class(bond_dev); - netif_carrier_off(bond_dev); - bond_create_proc_entry(bond); list_add_tail(&bond->bond_list, &bn->dev_list); @@ -5237,6 +5047,8 @@ int bond_create(struct net *net, const char *name) res = register_netdevice(bond_dev); + netif_carrier_off(bond_dev); + out: rtnl_unlock(); if (res < 0) @@ -5275,7 +5087,7 @@ static int __init bonding_init(void) int i; int res; - pr_info("%s", version); + pr_info("%s", bond_version); res = bond_check_params(&bonding_defaults); if (res) diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c new file mode 100644 index 000000000000..c32ff55a34c1 --- /dev/null +++ b/drivers/net/bonding/bond_procfs.c @@ -0,0 +1,275 @@ +#include <linux/proc_fs.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> +#include "bonding.h" + + +extern const char *bond_mode_name(int mode); + +static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) + __acquires(RCU) + __acquires(&bond->lock) +{ + struct bonding *bond = seq->private; + loff_t off = 0; + struct slave *slave; + int i; + + /* make sure the bond won't be taken away */ + rcu_read_lock(); + read_lock(&bond->lock); + + if (*pos == 0) + return SEQ_START_TOKEN; + + bond_for_each_slave(bond, slave, i) { + if (++off == *pos) + return slave; + } + + return NULL; +} + +static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bonding *bond = seq->private; + struct slave *slave = v; + + ++*pos; + if (v == SEQ_START_TOKEN) + return bond->first_slave; + + slave = slave->next; + + return (slave == bond->first_slave) ? NULL : slave; +} + +static void bond_info_seq_stop(struct seq_file *seq, void *v) + __releases(&bond->lock) + __releases(RCU) +{ + struct bonding *bond = seq->private; + + read_unlock(&bond->lock); + rcu_read_unlock(); +} + +static void bond_info_show_master(struct seq_file *seq) +{ + struct bonding *bond = seq->private; + struct slave *curr; + int i; + + read_lock(&bond->curr_slave_lock); + curr = bond->curr_active_slave; + read_unlock(&bond->curr_slave_lock); + + seq_printf(seq, "Bonding Mode: %s", + bond_mode_name(bond->params.mode)); + + if (bond->params.mode == BOND_MODE_ACTIVEBACKUP && + bond->params.fail_over_mac) + seq_printf(seq, " (fail_over_mac %s)", + fail_over_mac_tbl[bond->params.fail_over_mac].modename); + + seq_printf(seq, "\n"); + + if (bond->params.mode == BOND_MODE_XOR || + bond->params.mode == BOND_MODE_8023AD) { + seq_printf(seq, "Transmit Hash Policy: %s (%d)\n", + xmit_hashtype_tbl[bond->params.xmit_policy].modename, + bond->params.xmit_policy); + } + + if (USES_PRIMARY(bond->params.mode)) { + seq_printf(seq, "Primary Slave: %s", + (bond->primary_slave) ? + bond->primary_slave->dev->name : "None"); + if (bond->primary_slave) + seq_printf(seq, " (primary_reselect %s)", + pri_reselect_tbl[bond->params.primary_reselect].modename); + + seq_printf(seq, "\nCurrently Active Slave: %s\n", + (curr) ? curr->dev->name : "None"); + } + + seq_printf(seq, "MII Status: %s\n", netif_carrier_ok(bond->dev) ? + "up" : "down"); + seq_printf(seq, "MII Polling Interval (ms): %d\n", bond->params.miimon); + seq_printf(seq, "Up Delay (ms): %d\n", + bond->params.updelay * bond->params.miimon); + seq_printf(seq, "Down Delay (ms): %d\n", + bond->params.downdelay * bond->params.miimon); + + + /* ARP information */ + if (bond->params.arp_interval > 0) { + int printed = 0; + seq_printf(seq, "ARP Polling Interval (ms): %d\n", + bond->params.arp_interval); + + seq_printf(seq, "ARP IP target/s (n.n.n.n form):"); + + for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { + if (!bond->params.arp_targets[i]) + break; + if (printed) + seq_printf(seq, ","); + seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); + printed = 1; + } + seq_printf(seq, "\n"); + } + + if (bond->params.mode == BOND_MODE_8023AD) { + struct ad_info ad_info; + + seq_puts(seq, "\n802.3ad info\n"); + seq_printf(seq, "LACP rate: %s\n", + (bond->params.lacp_fast) ? "fast" : "slow"); + seq_printf(seq, "Aggregator selection policy (ad_select): %s\n", + ad_select_tbl[bond->params.ad_select].modename); + + if (bond_3ad_get_active_agg_info(bond, &ad_info)) { + seq_printf(seq, "bond %s has no active aggregator\n", + bond->dev->name); + } else { + seq_printf(seq, "Active Aggregator Info:\n"); + + seq_printf(seq, "\tAggregator ID: %d\n", + ad_info.aggregator_id); + seq_printf(seq, "\tNumber of ports: %d\n", + ad_info.ports); + seq_printf(seq, "\tActor Key: %d\n", + ad_info.actor_key); + seq_printf(seq, "\tPartner Key: %d\n", + ad_info.partner_key); + seq_printf(seq, "\tPartner Mac Address: %pM\n", + ad_info.partner_system); + } + } +} + +static void bond_info_show_slave(struct seq_file *seq, + const struct slave *slave) +{ + struct bonding *bond = seq->private; + + seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name); + seq_printf(seq, "MII Status: %s\n", + (slave->link == BOND_LINK_UP) ? "up" : "down"); + seq_printf(seq, "Speed: %d Mbps\n", slave->speed); + seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half"); + seq_printf(seq, "Link Failure Count: %u\n", + slave->link_failure_count); + + seq_printf(seq, "Permanent HW addr: %pM\n", slave->perm_hwaddr); + + if (bond->params.mode == BOND_MODE_8023AD) { + const struct aggregator *agg + = SLAVE_AD_INFO(slave).port.aggregator; + + if (agg) + seq_printf(seq, "Aggregator ID: %d\n", + agg->aggregator_identifier); + else + seq_puts(seq, "Aggregator ID: N/A\n"); + } + seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id); +} + +static int bond_info_seq_show(struct seq_file *seq, void *v) +{ + if (v == SEQ_START_TOKEN) { + seq_printf(seq, "%s\n", bond_version); + bond_info_show_master(seq); + } else + bond_info_show_slave(seq, v); + + return 0; +} + +static const struct seq_operations bond_info_seq_ops = { + .start = bond_info_seq_start, + .next = bond_info_seq_next, + .stop = bond_info_seq_stop, + .show = bond_info_seq_show, +}; + +static int bond_info_open(struct inode *inode, struct file *file) +{ + struct seq_file *seq; + struct proc_dir_entry *proc; + int res; + + res = seq_open(file, &bond_info_seq_ops); + if (!res) { + /* recover the pointer buried in proc_dir_entry data */ + seq = file->private_data; + proc = PDE(inode); + seq->private = proc->data; + } + + return res; +} + +static const struct file_operations bond_info_fops = { + .owner = THIS_MODULE, + .open = bond_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +void bond_create_proc_entry(struct bonding *bond) +{ + struct net_device *bond_dev = bond->dev; + struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); + + if (bn->proc_dir) { + bond->proc_entry = proc_create_data(bond_dev->name, + S_IRUGO, bn->proc_dir, + &bond_info_fops, bond); + if (bond->proc_entry == NULL) + pr_warning("Warning: Cannot create /proc/net/%s/%s\n", + DRV_NAME, bond_dev->name); + else + memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); + } +} + +void bond_remove_proc_entry(struct bonding *bond) +{ + struct net_device *bond_dev = bond->dev; + struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); + + if (bn->proc_dir && bond->proc_entry) { + remove_proc_entry(bond->proc_file_name, bn->proc_dir); + memset(bond->proc_file_name, 0, IFNAMSIZ); + bond->proc_entry = NULL; + } +} + +/* Create the bonding directory under /proc/net, if doesn't exist yet. + * Caller must hold rtnl_lock. + */ +void __net_init bond_create_proc_dir(struct bond_net *bn) +{ + if (!bn->proc_dir) { + bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net); + if (!bn->proc_dir) + pr_warning("Warning: cannot create /proc/net/%s\n", + DRV_NAME); + } +} + +/* Destroy the bonding directory under /proc/net, if empty. + * Caller must hold rtnl_lock. + */ +void __net_exit bond_destroy_proc_dir(struct bond_net *bn) +{ + if (bn->proc_dir) { + remove_proc_entry(DRV_NAME, bn->net->proc_net); + bn->proc_dir = NULL; + } +} diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 8fd0174c5380..de87aea6d01a 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -118,7 +118,10 @@ static ssize_t bonding_store_bonds(struct class *cls, pr_info("%s is being created...\n", ifname); rv = bond_create(net, ifname); if (rv) { - pr_info("Bond creation failed.\n"); + if (rv == -EEXIST) + pr_info("%s already exists.\n", ifname); + else + pr_info("%s creation failed.\n", ifname); res = rv; } } else if (command[0] == '-') { @@ -322,11 +325,6 @@ static ssize_t bonding_store_mode(struct device *d, ret = -EINVAL; goto out; } - if (bond->params.mode == BOND_MODE_8023AD) - bond_unset_master_3ad_flags(bond); - - if (bond->params.mode == BOND_MODE_ALB) - bond_unset_master_alb_flags(bond); bond->params.mode = new_value; bond_set_mode_ops(bond, bond->params.mode); @@ -527,8 +525,6 @@ static ssize_t bonding_store_arp_interval(struct device *d, pr_info("%s: Setting ARP monitoring interval to %d.\n", bond->dev->name, new_value); bond->params.arp_interval = new_value; - if (bond->params.arp_interval) - bond->dev->priv_flags |= IFF_MASTER_ARPMON; if (bond->params.miimon) { pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", bond->dev->name, bond->dev->name); @@ -1004,7 +1000,6 @@ static ssize_t bonding_store_miimon(struct device *d, pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", bond->dev->name); bond->params.arp_interval = 0; - bond->dev->priv_flags &= ~IFF_MASTER_ARPMON; if (bond->params.arp_validate) { bond_unregister_arp(bond); bond->params.arp_validate = @@ -1198,7 +1193,7 @@ static ssize_t bonding_store_carrier(struct device *d, bond->dev->name, new_value); } out: - return count; + return ret; } static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, bonding_show_carrier, bonding_store_carrier); @@ -1587,15 +1582,15 @@ static ssize_t bonding_store_slaves_active(struct device *d, } bond_for_each_slave(bond, slave, i) { - if (slave->state == BOND_STATE_BACKUP) { + if (!bond_is_active_slave(slave)) { if (new_value) - slave->dev->priv_flags &= ~IFF_SLAVE_INACTIVE; + slave->inactive = 0; else - slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; + slave->inactive = 1; } } out: - return count; + return ret; } static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, bonding_show_slaves_active, bonding_store_slaves_active); diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 31fe980e4e28..6b26962fd0ec 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -20,6 +20,7 @@ #include <linux/if_bonding.h> #include <linux/cpumask.h> #include <linux/in6.h> +#include <linux/netpoll.h> #include "bond_3ad.h" #include "bond_alb.h" @@ -28,6 +29,8 @@ #define DRV_NAME "bonding" #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" +#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" + #define BOND_MAX_ARP_TARGETS 16 #define IS_UP(dev) \ @@ -52,7 +55,7 @@ (((slave)->dev->flags & IFF_UP) && \ netif_running((slave)->dev) && \ ((slave)->link == BOND_LINK_UP) && \ - ((slave)->state == BOND_STATE_ACTIVE)) + bond_is_active_slave(slave)) #define USES_PRIMARY(mode) \ @@ -132,7 +135,7 @@ static inline void unblock_netpoll_tx(void) static inline int is_netpoll_tx_blocked(struct net_device *dev) { - if (unlikely(dev->priv_flags & IFF_IN_NETPOLL)) + if (unlikely(netpoll_tx_running(dev))) return atomic_read(&netpoll_block_tx); return 0; } @@ -189,7 +192,9 @@ struct slave { unsigned long last_arp_rx; s8 link; /* one of BOND_LINK_XXXX */ s8 new_link; - s8 state; /* one of BOND_STATE_XXXX */ + u8 backup:1, /* indicates backup slave. Value corresponds with + BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ + inactive:1; /* indicates inactive slave */ u32 original_mtu; u32 link_failure_count; u8 perm_hwaddr[ETH_ALEN]; @@ -198,6 +203,9 @@ struct slave { u16 queue_id; struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ struct tlb_slave_info tlb_info; +#ifdef CONFIG_NET_POLL_CONTROLLER + struct netpoll *np; +#endif }; /* @@ -260,12 +268,16 @@ struct bonding { #endif /* CONFIG_DEBUG_FS */ }; +#define bond_slave_get_rcu(dev) \ + ((struct slave *) rcu_dereference(dev->rx_handler_data)) + /** * Returns NULL if the net_device does not belong to any of the bond's slaves * * Caller must hold bond lock for read */ -static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct net_device *slave_dev) +static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, + struct net_device *slave_dev) { struct slave *slave = NULL; int i; @@ -276,7 +288,7 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n } } - return 0; + return NULL; } static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) @@ -294,6 +306,26 @@ static inline bool bond_is_lb(const struct bonding *bond) bond->params.mode == BOND_MODE_ALB); } +static inline void bond_set_active_slave(struct slave *slave) +{ + slave->backup = 0; +} + +static inline void bond_set_backup_slave(struct slave *slave) +{ + slave->backup = 1; +} + +static inline int bond_slave_state(struct slave *slave) +{ + return slave->backup; +} + +static inline bool bond_is_active_slave(struct slave *slave) +{ + return !bond_slave_state(slave); +} + #define BOND_PRI_RESELECT_ALWAYS 0 #define BOND_PRI_RESELECT_BETTER 1 #define BOND_PRI_RESELECT_FAILURE 2 @@ -311,7 +343,7 @@ static inline bool bond_is_lb(const struct bonding *bond) static inline int slave_do_arp_validate(struct bonding *bond, struct slave *slave) { - return bond->params.arp_validate & (1 << slave->state); + return bond->params.arp_validate & (1 << bond_slave_state(slave)); } static inline unsigned long slave_last_rx(struct bonding *bond, @@ -323,41 +355,40 @@ static inline unsigned long slave_last_rx(struct bonding *bond, return slave->dev->last_rx; } -static inline void bond_set_slave_inactive_flags(struct slave *slave) +#ifdef CONFIG_NET_POLL_CONTROLLER +static inline void bond_netpoll_send_skb(const struct slave *slave, + struct sk_buff *skb) { - struct bonding *bond = netdev_priv(slave->dev->master); - if (!bond_is_lb(bond)) - slave->state = BOND_STATE_BACKUP; - if (!bond->params.all_slaves_active) - slave->dev->priv_flags |= IFF_SLAVE_INACTIVE; - if (slave_do_arp_validate(bond, slave)) - slave->dev->priv_flags |= IFF_SLAVE_NEEDARP; -} + struct netpoll *np = slave->np; -static inline void bond_set_slave_active_flags(struct slave *slave) -{ - slave->state = BOND_STATE_ACTIVE; - slave->dev->priv_flags &= ~(IFF_SLAVE_INACTIVE | IFF_SLAVE_NEEDARP); + if (np) + netpoll_send_skb(np, skb); } - -static inline void bond_set_master_3ad_flags(struct bonding *bond) +#else +static inline void bond_netpoll_send_skb(const struct slave *slave, + struct sk_buff *skb) { - bond->dev->priv_flags |= IFF_MASTER_8023AD; } +#endif -static inline void bond_unset_master_3ad_flags(struct bonding *bond) +static inline void bond_set_slave_inactive_flags(struct slave *slave) { - bond->dev->priv_flags &= ~IFF_MASTER_8023AD; + struct bonding *bond = netdev_priv(slave->dev->master); + if (!bond_is_lb(bond)) + bond_set_backup_slave(slave); + if (!bond->params.all_slaves_active) + slave->inactive = 1; } -static inline void bond_set_master_alb_flags(struct bonding *bond) +static inline void bond_set_slave_active_flags(struct slave *slave) { - bond->dev->priv_flags |= IFF_MASTER_ALB; + bond_set_active_slave(slave); + slave->inactive = 0; } -static inline void bond_unset_master_alb_flags(struct bonding *bond) +static inline bool bond_is_slave_inactive(struct slave *slave) { - bond->dev->priv_flags &= ~IFF_MASTER_ALB; + return slave->inactive; } struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); @@ -393,6 +424,30 @@ struct bond_net { #endif }; +#ifdef CONFIG_PROC_FS +void bond_create_proc_entry(struct bonding *bond); +void bond_remove_proc_entry(struct bonding *bond); +void bond_create_proc_dir(struct bond_net *bn); +void bond_destroy_proc_dir(struct bond_net *bn); +#else +static inline void bond_create_proc_entry(struct bonding *bond) +{ +} + +static inline void bond_remove_proc_entry(struct bonding *bond) +{ +} + +static inline void bond_create_proc_dir(struct bond_net *bn) +{ +} + +static inline void bond_destroy_proc_dir(struct bond_net *bn) +{ +} +#endif + + /* exported from bond_main.c */ extern int bond_net_id; extern const struct bond_parm_tbl bond_lacp_tbl[]; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 5dec456fd4a4..1d699e3df547 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -115,6 +115,8 @@ source "drivers/net/can/mscan/Kconfig" source "drivers/net/can/sja1000/Kconfig" +source "drivers/net/can/c_can/Kconfig" + source "drivers/net/can/usb/Kconfig" source "drivers/net/can/softing/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 53c82a71778e..24ebfe8d758a 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -13,6 +13,7 @@ obj-y += softing/ obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_MSCAN) += mscan/ +obj-$(CONFIG_CAN_C_CAN) += c_can/ obj-$(CONFIG_CAN_AT91) += at91_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_MCP251X) += mcp251x.o diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig new file mode 100644 index 000000000000..ffb9773d102d --- /dev/null +++ b/drivers/net/can/c_can/Kconfig @@ -0,0 +1,15 @@ +menuconfig CAN_C_CAN + tristate "Bosch C_CAN devices" + depends on CAN_DEV && HAS_IOMEM + +if CAN_C_CAN + +config CAN_C_CAN_PLATFORM + tristate "Generic Platform Bus based C_CAN driver" + ---help--- + This driver adds support for the C_CAN chips connected to + the "platform bus" (Linux abstraction for directly to the + processor attached devices) which can be found on various + boards from ST Microelectronics (http://www.st.com) + like the SPEAr1310 and SPEAr320 evaluation boards. +endif diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile new file mode 100644 index 000000000000..9273f6d5c4b7 --- /dev/null +++ b/drivers/net/can/c_can/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for the Bosch C_CAN controller drivers. +# + +obj-$(CONFIG_CAN_C_CAN) += c_can.o +obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o + +ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c new file mode 100644 index 000000000000..14050786218a --- /dev/null +++ b/drivers/net/can/c_can/c_can.c @@ -0,0 +1,1158 @@ +/* + * CAN bus driver for Bosch C_CAN controller + * + * Copyright (C) 2010 ST Microelectronics + * Bhupesh Sharma <bhupesh.sharma@st.com> + * + * Borrowed heavily from the C_CAN driver originally written by: + * Copyright (C) 2007 + * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de> + * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch> + * + * TX and RX NAPI implementation has been borrowed from at91 CAN driver + * written by: + * Copyright + * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> + * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de> + * + * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B. + * Bosch C_CAN user manual can be obtained from: + * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/ + * users_manual_c_can.pdf + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/version.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/if_arp.h> +#include <linux/if_ether.h> +#include <linux/list.h> +#include <linux/delay.h> +#include <linux/io.h> + +#include <linux/can.h> +#include <linux/can/dev.h> +#include <linux/can/error.h> + +#include "c_can.h" + +/* control register */ +#define CONTROL_TEST BIT(7) +#define CONTROL_CCE BIT(6) +#define CONTROL_DISABLE_AR BIT(5) +#define CONTROL_ENABLE_AR (0 << 5) +#define CONTROL_EIE BIT(3) +#define CONTROL_SIE BIT(2) +#define CONTROL_IE BIT(1) +#define CONTROL_INIT BIT(0) + +/* test register */ +#define TEST_RX BIT(7) +#define TEST_TX1 BIT(6) +#define TEST_TX2 BIT(5) +#define TEST_LBACK BIT(4) +#define TEST_SILENT BIT(3) +#define TEST_BASIC BIT(2) + +/* status register */ +#define STATUS_BOFF BIT(7) +#define STATUS_EWARN BIT(6) +#define STATUS_EPASS BIT(5) +#define STATUS_RXOK BIT(4) +#define STATUS_TXOK BIT(3) + +/* error counter register */ +#define ERR_CNT_TEC_MASK 0xff +#define ERR_CNT_TEC_SHIFT 0 +#define ERR_CNT_REC_SHIFT 8 +#define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT) +#define ERR_CNT_RP_SHIFT 15 +#define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT) + +/* bit-timing register */ +#define BTR_BRP_MASK 0x3f +#define BTR_BRP_SHIFT 0 +#define BTR_SJW_SHIFT 6 +#define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT) +#define BTR_TSEG1_SHIFT 8 +#define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT) +#define BTR_TSEG2_SHIFT 12 +#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT) + +/* brp extension register */ +#define BRP_EXT_BRPE_MASK 0x0f +#define BRP_EXT_BRPE_SHIFT 0 + +/* IFx command request */ +#define IF_COMR_BUSY BIT(15) + +/* IFx command mask */ +#define IF_COMM_WR BIT(7) +#define IF_COMM_MASK BIT(6) +#define IF_COMM_ARB BIT(5) +#define IF_COMM_CONTROL BIT(4) +#define IF_COMM_CLR_INT_PND BIT(3) +#define IF_COMM_TXRQST BIT(2) +#define IF_COMM_DATAA BIT(1) +#define IF_COMM_DATAB BIT(0) +#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \ + IF_COMM_CONTROL | IF_COMM_TXRQST | \ + IF_COMM_DATAA | IF_COMM_DATAB) + +/* IFx arbitration */ +#define IF_ARB_MSGVAL BIT(15) +#define IF_ARB_MSGXTD BIT(14) +#define IF_ARB_TRANSMIT BIT(13) + +/* IFx message control */ +#define IF_MCONT_NEWDAT BIT(15) +#define IF_MCONT_MSGLST BIT(14) +#define IF_MCONT_CLR_MSGLST (0 << 14) +#define IF_MCONT_INTPND BIT(13) +#define IF_MCONT_UMASK BIT(12) +#define IF_MCONT_TXIE BIT(11) +#define IF_MCONT_RXIE BIT(10) +#define IF_MCONT_RMTEN BIT(9) +#define IF_MCONT_TXRQST BIT(8) +#define IF_MCONT_EOB BIT(7) +#define IF_MCONT_DLC_MASK 0xf + +/* + * IFx register masks: + * allow easy operation on 16-bit registers when the + * argument is 32-bit instead + */ +#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF) +#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16) + +/* message object split */ +#define C_CAN_NO_OF_OBJECTS 32 +#define C_CAN_MSG_OBJ_RX_NUM 16 +#define C_CAN_MSG_OBJ_TX_NUM 16 + +#define C_CAN_MSG_OBJ_RX_FIRST 1 +#define C_CAN_MSG_OBJ_RX_LAST (C_CAN_MSG_OBJ_RX_FIRST + \ + C_CAN_MSG_OBJ_RX_NUM - 1) + +#define C_CAN_MSG_OBJ_TX_FIRST (C_CAN_MSG_OBJ_RX_LAST + 1) +#define C_CAN_MSG_OBJ_TX_LAST (C_CAN_MSG_OBJ_TX_FIRST + \ + C_CAN_MSG_OBJ_TX_NUM - 1) + +#define C_CAN_MSG_OBJ_RX_SPLIT 9 +#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1) + +#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1) +#define RECEIVE_OBJECT_BITS 0x0000ffff + +/* status interrupt */ +#define STATUS_INTERRUPT 0x8000 + +/* global interrupt masks */ +#define ENABLE_ALL_INTERRUPTS 1 +#define DISABLE_ALL_INTERRUPTS 0 + +/* minimum timeout for checking BUSY status */ +#define MIN_TIMEOUT_VALUE 6 + +/* napi related */ +#define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM + +/* c_can lec values */ +enum c_can_lec_type { + LEC_NO_ERROR = 0, + LEC_STUFF_ERROR, + LEC_FORM_ERROR, + LEC_ACK_ERROR, + LEC_BIT1_ERROR, + LEC_BIT0_ERROR, + LEC_CRC_ERROR, + LEC_UNUSED, +}; + +/* + * c_can error types: + * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported + */ +enum c_can_bus_error_types { + C_CAN_NO_ERROR = 0, + C_CAN_BUS_OFF, + C_CAN_ERROR_WARNING, + C_CAN_ERROR_PASSIVE, +}; + +static struct can_bittiming_const c_can_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/ + .brp_inc = 1, +}; + +static inline int get_tx_next_msg_obj(const struct c_can_priv *priv) +{ + return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) + + C_CAN_MSG_OBJ_TX_FIRST; +} + +static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv) +{ + return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) + + C_CAN_MSG_OBJ_TX_FIRST; +} + +static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg) +{ + u32 val = priv->read_reg(priv, reg); + val |= ((u32) priv->read_reg(priv, reg + 2)) << 16; + return val; +} + +static void c_can_enable_all_interrupts(struct c_can_priv *priv, + int enable) +{ + unsigned int cntrl_save = priv->read_reg(priv, + &priv->regs->control); + + if (enable) + cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE); + else + cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE); + + priv->write_reg(priv, &priv->regs->control, cntrl_save); +} + +static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface) +{ + int count = MIN_TIMEOUT_VALUE; + + while (count && priv->read_reg(priv, + &priv->regs->ifregs[iface].com_req) & + IF_COMR_BUSY) { + count--; + udelay(1); + } + + if (!count) + return 1; + + return 0; +} + +static inline void c_can_object_get(struct net_device *dev, + int iface, int objno, int mask) +{ + struct c_can_priv *priv = netdev_priv(dev); + + /* + * As per specs, after writting the message object number in the + * IF command request register the transfer b/w interface + * register and message RAM must be complete in 6 CAN-CLK + * period. + */ + priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask, + IFX_WRITE_LOW_16BIT(mask)); + priv->write_reg(priv, &priv->regs->ifregs[iface].com_req, + IFX_WRITE_LOW_16BIT(objno)); + + if (c_can_msg_obj_is_busy(priv, iface)) + netdev_err(dev, "timed out in object get\n"); +} + +static inline void c_can_object_put(struct net_device *dev, + int iface, int objno, int mask) +{ + struct c_can_priv *priv = netdev_priv(dev); + + /* + * As per specs, after writting the message object number in the + * IF command request register the transfer b/w interface + * register and message RAM must be complete in 6 CAN-CLK + * period. + */ + priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask, + (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask))); + priv->write_reg(priv, &priv->regs->ifregs[iface].com_req, + IFX_WRITE_LOW_16BIT(objno)); + + if (c_can_msg_obj_is_busy(priv, iface)) + netdev_err(dev, "timed out in object put\n"); +} + +static void c_can_write_msg_object(struct net_device *dev, + int iface, struct can_frame *frame, int objno) +{ + int i; + u16 flags = 0; + unsigned int id; + struct c_can_priv *priv = netdev_priv(dev); + + if (!(frame->can_id & CAN_RTR_FLAG)) + flags |= IF_ARB_TRANSMIT; + + if (frame->can_id & CAN_EFF_FLAG) { + id = frame->can_id & CAN_EFF_MASK; + flags |= IF_ARB_MSGXTD; + } else + id = ((frame->can_id & CAN_SFF_MASK) << 18); + + flags |= IF_ARB_MSGVAL; + + priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, + IFX_WRITE_LOW_16BIT(id)); + priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags | + IFX_WRITE_HIGH_16BIT(id)); + + for (i = 0; i < frame->can_dlc; i += 2) { + priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2], + frame->data[i] | (frame->data[i + 1] << 8)); + } + + /* enable interrupt for this message object */ + priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, + IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB | + frame->can_dlc); + c_can_object_put(dev, iface, objno, IF_COMM_ALL); +} + +static inline void c_can_mark_rx_msg_obj(struct net_device *dev, + int iface, int ctrl_mask, + int obj) +{ + struct c_can_priv *priv = netdev_priv(dev); + + priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, + ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND)); + c_can_object_put(dev, iface, obj, IF_COMM_CONTROL); + +} + +static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev, + int iface, + int ctrl_mask) +{ + int i; + struct c_can_priv *priv = netdev_priv(dev); + + for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) { + priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, + ctrl_mask & ~(IF_MCONT_MSGLST | + IF_MCONT_INTPND | IF_MCONT_NEWDAT)); + c_can_object_put(dev, iface, i, IF_COMM_CONTROL); + } +} + +static inline void c_can_activate_rx_msg_obj(struct net_device *dev, + int iface, int ctrl_mask, + int obj) +{ + struct c_can_priv *priv = netdev_priv(dev); + + priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, + ctrl_mask & ~(IF_MCONT_MSGLST | + IF_MCONT_INTPND | IF_MCONT_NEWDAT)); + c_can_object_put(dev, iface, obj, IF_COMM_CONTROL); +} + +static void c_can_handle_lost_msg_obj(struct net_device *dev, + int iface, int objno) +{ + struct c_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + struct can_frame *frame; + + netdev_err(dev, "msg lost in buffer %d\n", objno); + + c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); + + priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, + IF_MCONT_CLR_MSGLST); + + c_can_object_put(dev, 0, objno, IF_COMM_CONTROL); + + /* create an error msg */ + skb = alloc_can_err_skb(dev, &frame); + if (unlikely(!skb)) + return; + + frame->can_id |= CAN_ERR_CRTL; + frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + stats->rx_errors++; + stats->rx_over_errors++; + + netif_receive_skb(skb); +} + +static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl) +{ + u16 flags, data; + int i; + unsigned int val; + struct c_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct sk_buff *skb; + struct can_frame *frame; + + skb = alloc_can_skb(dev, &frame); + if (!skb) { + stats->rx_dropped++; + return -ENOMEM; + } + + frame->can_dlc = get_can_dlc(ctrl & 0x0F); + + flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2); + val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) | + (flags << 16); + + if (flags & IF_ARB_MSGXTD) + frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + frame->can_id = (val >> 18) & CAN_SFF_MASK; + + if (flags & IF_ARB_TRANSMIT) + frame->can_id |= CAN_RTR_FLAG; + else { + for (i = 0; i < frame->can_dlc; i += 2) { + data = priv->read_reg(priv, + &priv->regs->ifregs[iface].data[i / 2]); + frame->data[i] = data; + frame->data[i + 1] = data >> 8; + } + } + + netif_receive_skb(skb); + + stats->rx_packets++; + stats->rx_bytes += frame->can_dlc; + + return 0; +} + +static void c_can_setup_receive_object(struct net_device *dev, int iface, + int objno, unsigned int mask, + unsigned int id, unsigned int mcont) +{ + struct c_can_priv *priv = netdev_priv(dev); + + priv->write_reg(priv, &priv->regs->ifregs[iface].mask1, + IFX_WRITE_LOW_16BIT(mask)); + priv->write_reg(priv, &priv->regs->ifregs[iface].mask2, + IFX_WRITE_HIGH_16BIT(mask)); + + priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, + IFX_WRITE_LOW_16BIT(id)); + priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, + (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id))); + + priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont); + c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); + + netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, + c_can_read_reg32(priv, &priv->regs->msgval1)); +} + +static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno) +{ + struct c_can_priv *priv = netdev_priv(dev); + + priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0); + priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0); + priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0); + + c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL); + + netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, + c_can_read_reg32(priv, &priv->regs->msgval1)); +} + +static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno) +{ + int val = c_can_read_reg32(priv, &priv->regs->txrqst1); + + /* + * as transmission request register's bit n-1 corresponds to + * message object n, we need to handle the same properly. + */ + if (val & (1 << (objno - 1))) + return 1; + + return 0; +} + +static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + u32 msg_obj_no; + struct c_can_priv *priv = netdev_priv(dev); + struct can_frame *frame = (struct can_frame *)skb->data; + + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; + + msg_obj_no = get_tx_next_msg_obj(priv); + + /* prepare message object for transmission */ + c_can_write_msg_object(dev, 0, frame, msg_obj_no); + can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); + + /* + * we have to stop the queue in case of a wrap around or + * if the next TX message object is still in use + */ + priv->tx_next++; + if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) || + (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0) + netif_stop_queue(dev); + + return NETDEV_TX_OK; +} + +static int c_can_set_bittiming(struct net_device *dev) +{ + unsigned int reg_btr, reg_brpe, ctrl_save; + u8 brp, brpe, sjw, tseg1, tseg2; + u32 ten_bit_brp; + struct c_can_priv *priv = netdev_priv(dev); + const struct can_bittiming *bt = &priv->can.bittiming; + + /* c_can provides a 6-bit brp and 4-bit brpe fields */ + ten_bit_brp = bt->brp - 1; + brp = ten_bit_brp & BTR_BRP_MASK; + brpe = ten_bit_brp >> 6; + + sjw = bt->sjw - 1; + tseg1 = bt->prop_seg + bt->phase_seg1 - 1; + tseg2 = bt->phase_seg2 - 1; + reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) | + (tseg2 << BTR_TSEG2_SHIFT); + reg_brpe = brpe & BRP_EXT_BRPE_MASK; + + netdev_info(dev, + "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe); + + ctrl_save = priv->read_reg(priv, &priv->regs->control); + priv->write_reg(priv, &priv->regs->control, + ctrl_save | CONTROL_CCE | CONTROL_INIT); + priv->write_reg(priv, &priv->regs->btr, reg_btr); + priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe); + priv->write_reg(priv, &priv->regs->control, ctrl_save); + + return 0; +} + +/* + * Configure C_CAN message objects for Tx and Rx purposes: + * C_CAN provides a total of 32 message objects that can be configured + * either for Tx or Rx purposes. Here the first 16 message objects are used as + * a reception FIFO. The end of reception FIFO is signified by the EoB bit + * being SET. The remaining 16 message objects are kept aside for Tx purposes. + * See user guide document for further details on configuring message + * objects. + */ +static void c_can_configure_msg_objects(struct net_device *dev) +{ + int i; + + /* first invalidate all message objects */ + for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++) + c_can_inval_msg_object(dev, 0, i); + + /* setup receive message objects */ + for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++) + c_can_setup_receive_object(dev, 0, i, 0, 0, + (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB); + + c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0, + IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK); +} + +/* + * Configure C_CAN chip: + * - enable/disable auto-retransmission + * - set operating mode + * - configure message objects + */ +static void c_can_chip_config(struct net_device *dev) +{ + struct c_can_priv *priv = netdev_priv(dev); + + if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + /* disable automatic retransmission */ + priv->write_reg(priv, &priv->regs->control, + CONTROL_DISABLE_AR); + else + /* enable automatic retransmission */ + priv->write_reg(priv, &priv->regs->control, + CONTROL_ENABLE_AR); + + if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY & + CAN_CTRLMODE_LOOPBACK)) { + /* loopback + silent mode : useful for hot self-test */ + priv->write_reg(priv, &priv->regs->control, CONTROL_EIE | + CONTROL_SIE | CONTROL_IE | CONTROL_TEST); + priv->write_reg(priv, &priv->regs->test, + TEST_LBACK | TEST_SILENT); + } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { + /* loopback mode : useful for self-test function */ + priv->write_reg(priv, &priv->regs->control, CONTROL_EIE | + CONTROL_SIE | CONTROL_IE | CONTROL_TEST); + priv->write_reg(priv, &priv->regs->test, TEST_LBACK); + } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { + /* silent mode : bus-monitoring mode */ + priv->write_reg(priv, &priv->regs->control, CONTROL_EIE | + CONTROL_SIE | CONTROL_IE | CONTROL_TEST); + priv->write_reg(priv, &priv->regs->test, TEST_SILENT); + } else + /* normal mode*/ + priv->write_reg(priv, &priv->regs->control, + CONTROL_EIE | CONTROL_SIE | CONTROL_IE); + + /* configure message objects */ + c_can_configure_msg_objects(dev); + + /* set a `lec` value so that we can check for updates later */ + priv->write_reg(priv, &priv->regs->status, LEC_UNUSED); + + /* set bittiming params */ + c_can_set_bittiming(dev); +} + +static void c_can_start(struct net_device *dev) +{ + struct c_can_priv *priv = netdev_priv(dev); + + /* enable status change, error and module interrupts */ + c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS); + + /* basic c_can configuration */ + c_can_chip_config(dev); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + /* reset tx helper pointers */ + priv->tx_next = priv->tx_echo = 0; +} + +static void c_can_stop(struct net_device *dev) +{ + struct c_can_priv *priv = netdev_priv(dev); + + /* disable all interrupts */ + c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); + + /* set the state as STOPPED */ + priv->can.state = CAN_STATE_STOPPED; +} + +static int c_can_set_mode(struct net_device *dev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + c_can_start(dev); + netif_wake_queue(dev); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int c_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + unsigned int reg_err_counter; + struct c_can_priv *priv = netdev_priv(dev); + + reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt); + bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> + ERR_CNT_REC_SHIFT; + bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; + + return 0; +} + +/* + * theory of operation: + * + * priv->tx_echo holds the number of the oldest can_frame put for + * transmission into the hardware, but not yet ACKed by the CAN tx + * complete IRQ. + * + * We iterate from priv->tx_echo to priv->tx_next and check if the + * packet has been transmitted, echo it back to the CAN framework. + * If we discover a not yet transmitted package, stop looking for more. + */ +static void c_can_do_tx(struct net_device *dev) +{ + u32 val; + u32 msg_obj_no; + struct c_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + + for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { + msg_obj_no = get_tx_echo_msg_obj(priv); + c_can_inval_msg_object(dev, 0, msg_obj_no); + val = c_can_read_reg32(priv, &priv->regs->txrqst1); + if (!(val & (1 << msg_obj_no))) { + can_get_echo_skb(dev, + msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); + stats->tx_bytes += priv->read_reg(priv, + &priv->regs->ifregs[0].msg_cntrl) + & IF_MCONT_DLC_MASK; + stats->tx_packets++; + } + } + + /* restart queue if wrap-up or if queue stalled on last pkt */ + if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) || + ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0)) + netif_wake_queue(dev); +} + +/* + * theory of operation: + * + * c_can core saves a received CAN message into the first free message + * object it finds free (starting with the lowest). Bits NEWDAT and + * INTPND are set for this message object indicating that a new message + * has arrived. To work-around this issue, we keep two groups of message + * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. + * + * To ensure in-order frame reception we use the following + * approach while re-activating a message object to receive further + * frames: + * - if the current message object number is lower than + * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing + * the INTPND bit. + * - if the current message object number is equal to + * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower + * receive message objects. + * - if the current message object number is greater than + * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of + * only this message object. + */ +static int c_can_do_rx_poll(struct net_device *dev, int quota) +{ + u32 num_rx_pkts = 0; + unsigned int msg_obj, msg_ctrl_save; + struct c_can_priv *priv = netdev_priv(dev); + u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1); + + for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST; + msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0; + val = c_can_read_reg32(priv, &priv->regs->intpnd1), + msg_obj++) { + /* + * as interrupt pending register's bit n-1 corresponds to + * message object n, we need to handle the same properly. + */ + if (val & (1 << (msg_obj - 1))) { + c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL & + ~IF_COMM_TXRQST); + msg_ctrl_save = priv->read_reg(priv, + &priv->regs->ifregs[0].msg_cntrl); + + if (msg_ctrl_save & IF_MCONT_EOB) + return num_rx_pkts; + + if (msg_ctrl_save & IF_MCONT_MSGLST) { + c_can_handle_lost_msg_obj(dev, 0, msg_obj); + num_rx_pkts++; + quota--; + continue; + } + + if (!(msg_ctrl_save & IF_MCONT_NEWDAT)) + continue; + + /* read the data from the message object */ + c_can_read_msg_object(dev, 0, msg_ctrl_save); + + if (msg_obj < C_CAN_MSG_RX_LOW_LAST) + c_can_mark_rx_msg_obj(dev, 0, + msg_ctrl_save, msg_obj); + else if (msg_obj > C_CAN_MSG_RX_LOW_LAST) + /* activate this msg obj */ + c_can_activate_rx_msg_obj(dev, 0, + msg_ctrl_save, msg_obj); + else if (msg_obj == C_CAN_MSG_RX_LOW_LAST) + /* activate all lower message objects */ + c_can_activate_all_lower_rx_msg_obj(dev, + 0, msg_ctrl_save); + + num_rx_pkts++; + quota--; + } + } + + return num_rx_pkts; +} + +static inline int c_can_has_and_handle_berr(struct c_can_priv *priv) +{ + return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && + (priv->current_status & LEC_UNUSED); +} + +static int c_can_handle_state_change(struct net_device *dev, + enum c_can_bus_error_types error_type) +{ + unsigned int reg_err_counter; + unsigned int rx_err_passive; + struct c_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + struct can_berr_counter bec; + + /* propogate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + c_can_get_berr_counter(dev, &bec); + reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt); + rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> + ERR_CNT_RP_SHIFT; + + switch (error_type) { + case C_CAN_ERROR_WARNING: + /* error warning state */ + priv->can.can_stats.error_warning++; + priv->can.state = CAN_STATE_ERROR_WARNING; + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + + break; + case C_CAN_ERROR_PASSIVE: + /* error passive state */ + priv->can.can_stats.error_passive++; + priv->can.state = CAN_STATE_ERROR_PASSIVE; + cf->can_id |= CAN_ERR_CRTL; + if (rx_err_passive) + cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + if (bec.txerr > 127) + cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; + + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + case C_CAN_BUS_OFF: + /* bus-off state */ + priv->can.state = CAN_STATE_BUS_OFF; + cf->can_id |= CAN_ERR_BUSOFF; + /* + * disable all interrupts in bus-off mode to ensure that + * the CPU is not hogged down + */ + c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); + can_bus_off(dev); + break; + default: + break; + } + + netif_receive_skb(skb); + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + return 1; +} + +static int c_can_handle_bus_err(struct net_device *dev, + enum c_can_lec_type lec_type) +{ + struct c_can_priv *priv = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + struct can_frame *cf; + struct sk_buff *skb; + + /* + * early exit if no lec update or no error. + * no lec update means that no CAN bus event has been detected + * since CPU wrote 0x7 value to status reg. + */ + if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR) + return 0; + + /* propogate the error condition to the CAN stack */ + skb = alloc_can_err_skb(dev, &cf); + if (unlikely(!skb)) + return 0; + + /* + * check for 'last error code' which tells us the + * type of the last error to occur on the CAN bus + */ + + /* common for all type of bus errors */ + priv->can.can_stats.bus_error++; + stats->rx_errors++; + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + cf->data[2] |= CAN_ERR_PROT_UNSPEC; + + switch (lec_type) { + case LEC_STUFF_ERROR: + netdev_dbg(dev, "stuff error\n"); + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + case LEC_FORM_ERROR: + netdev_dbg(dev, "form error\n"); + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case LEC_ACK_ERROR: + netdev_dbg(dev, "ack error\n"); + cf->data[2] |= (CAN_ERR_PROT_LOC_ACK | + CAN_ERR_PROT_LOC_ACK_DEL); + break; + case LEC_BIT1_ERROR: + netdev_dbg(dev, "bit1 error\n"); + cf->data[2] |= CAN_ERR_PROT_BIT1; + break; + case LEC_BIT0_ERROR: + netdev_dbg(dev, "bit0 error\n"); + cf->data[2] |= CAN_ERR_PROT_BIT0; + break; + case LEC_CRC_ERROR: + netdev_dbg(dev, "CRC error\n"); + cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ | + CAN_ERR_PROT_LOC_CRC_DEL); + break; + default: + break; + } + + /* set a `lec` value so that we can check for updates later */ + priv->write_reg(priv, &priv->regs->status, LEC_UNUSED); + + netif_receive_skb(skb); + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + + return 1; +} + +static int c_can_poll(struct napi_struct *napi, int quota) +{ + u16 irqstatus; + int lec_type = 0; + int work_done = 0; + struct net_device *dev = napi->dev; + struct c_can_priv *priv = netdev_priv(dev); + + irqstatus = priv->read_reg(priv, &priv->regs->interrupt); + if (!irqstatus) + goto end; + + /* status events have the highest priority */ + if (irqstatus == STATUS_INTERRUPT) { + priv->current_status = priv->read_reg(priv, + &priv->regs->status); + + /* handle Tx/Rx events */ + if (priv->current_status & STATUS_TXOK) + priv->write_reg(priv, &priv->regs->status, + priv->current_status & ~STATUS_TXOK); + + if (priv->current_status & STATUS_RXOK) + priv->write_reg(priv, &priv->regs->status, + priv->current_status & ~STATUS_RXOK); + + /* handle state changes */ + if ((priv->current_status & STATUS_EWARN) && + (!(priv->last_status & STATUS_EWARN))) { + netdev_dbg(dev, "entered error warning state\n"); + work_done += c_can_handle_state_change(dev, + C_CAN_ERROR_WARNING); + } + if ((priv->current_status & STATUS_EPASS) && + (!(priv->last_status & STATUS_EPASS))) { + netdev_dbg(dev, "entered error passive state\n"); + work_done += c_can_handle_state_change(dev, + C_CAN_ERROR_PASSIVE); + } + if ((priv->current_status & STATUS_BOFF) && + (!(priv->last_status & STATUS_BOFF))) { + netdev_dbg(dev, "entered bus off state\n"); + work_done += c_can_handle_state_change(dev, + C_CAN_BUS_OFF); + } + + /* handle bus recovery events */ + if ((!(priv->current_status & STATUS_BOFF)) && + (priv->last_status & STATUS_BOFF)) { + netdev_dbg(dev, "left bus off state\n"); + priv->can.state = CAN_STATE_ERROR_ACTIVE; + } + if ((!(priv->current_status & STATUS_EPASS)) && + (priv->last_status & STATUS_EPASS)) { + netdev_dbg(dev, "left error passive state\n"); + priv->can.state = CAN_STATE_ERROR_ACTIVE; + } + + priv->last_status = priv->current_status; + + /* handle lec errors on the bus */ + lec_type = c_can_has_and_handle_berr(priv); + if (lec_type) + work_done += c_can_handle_bus_err(dev, lec_type); + } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) && + (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) { + /* handle events corresponding to receive message objects */ + work_done += c_can_do_rx_poll(dev, (quota - work_done)); + } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) && + (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) { + /* handle events corresponding to transmit message objects */ + c_can_do_tx(dev); + } + +end: + if (work_done < quota) { + napi_complete(napi); + /* enable all IRQs */ + c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS); + } + + return work_done; +} + +static irqreturn_t c_can_isr(int irq, void *dev_id) +{ + u16 irqstatus; + struct net_device *dev = (struct net_device *)dev_id; + struct c_can_priv *priv = netdev_priv(dev); + + irqstatus = priv->read_reg(priv, &priv->regs->interrupt); + if (!irqstatus) + return IRQ_NONE; + + /* disable all interrupts and schedule the NAPI */ + c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); + napi_schedule(&priv->napi); + + return IRQ_HANDLED; +} + +static int c_can_open(struct net_device *dev) +{ + int err; + struct c_can_priv *priv = netdev_priv(dev); + + /* open the can device */ + err = open_candev(dev); + if (err) { + netdev_err(dev, "failed to open can device\n"); + return err; + } + + /* register interrupt handler */ + err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name, + dev); + if (err < 0) { + netdev_err(dev, "failed to request interrupt\n"); + goto exit_irq_fail; + } + + /* start the c_can controller */ + c_can_start(dev); + + napi_enable(&priv->napi); + netif_start_queue(dev); + + return 0; + +exit_irq_fail: + close_candev(dev); + return err; +} + +static int c_can_close(struct net_device *dev) +{ + struct c_can_priv *priv = netdev_priv(dev); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + c_can_stop(dev); + free_irq(dev->irq, dev); + close_candev(dev); + + return 0; +} + +struct net_device *alloc_c_can_dev(void) +{ + struct net_device *dev; + struct c_can_priv *priv; + + dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM); + if (!dev) + return NULL; + + priv = netdev_priv(dev); + netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT); + + priv->dev = dev; + priv->can.bittiming_const = &c_can_bittiming_const; + priv->can.do_set_mode = c_can_set_mode; + priv->can.do_get_berr_counter = c_can_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT | + CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_BERR_REPORTING; + + return dev; +} +EXPORT_SYMBOL_GPL(alloc_c_can_dev); + +void free_c_can_dev(struct net_device *dev) +{ + free_candev(dev); +} +EXPORT_SYMBOL_GPL(free_c_can_dev); + +static const struct net_device_ops c_can_netdev_ops = { + .ndo_open = c_can_open, + .ndo_stop = c_can_close, + .ndo_start_xmit = c_can_start_xmit, +}; + +int register_c_can_dev(struct net_device *dev) +{ + dev->flags |= IFF_ECHO; /* we support local echo */ + dev->netdev_ops = &c_can_netdev_ops; + + return register_candev(dev); +} +EXPORT_SYMBOL_GPL(register_c_can_dev); + +void unregister_c_can_dev(struct net_device *dev) +{ + struct c_can_priv *priv = netdev_priv(dev); + + /* disable all interrupts */ + c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); + + unregister_candev(dev); +} +EXPORT_SYMBOL_GPL(unregister_c_can_dev); + +MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller"); diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h new file mode 100644 index 000000000000..9b7fbef3d09a --- /dev/null +++ b/drivers/net/can/c_can/c_can.h @@ -0,0 +1,86 @@ +/* + * CAN bus driver for Bosch C_CAN controller + * + * Copyright (C) 2010 ST Microelectronics + * Bhupesh Sharma <bhupesh.sharma@st.com> + * + * Borrowed heavily from the C_CAN driver originally written by: + * Copyright (C) 2007 + * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de> + * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch> + * + * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B. + * Bosch C_CAN user manual can be obtained from: + * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/ + * users_manual_c_can.pdf + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef C_CAN_H +#define C_CAN_H + +/* c_can IF registers */ +struct c_can_if_regs { + u16 com_req; + u16 com_mask; + u16 mask1; + u16 mask2; + u16 arb1; + u16 arb2; + u16 msg_cntrl; + u16 data[4]; + u16 _reserved[13]; +}; + +/* c_can hardware registers */ +struct c_can_regs { + u16 control; + u16 status; + u16 err_cnt; + u16 btr; + u16 interrupt; + u16 test; + u16 brp_ext; + u16 _reserved1; + struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */ + u16 _reserved2[8]; + u16 txrqst1; + u16 txrqst2; + u16 _reserved3[6]; + u16 newdat1; + u16 newdat2; + u16 _reserved4[6]; + u16 intpnd1; + u16 intpnd2; + u16 _reserved5[6]; + u16 msgval1; + u16 msgval2; + u16 _reserved6[6]; +}; + +/* c_can private data structure */ +struct c_can_priv { + struct can_priv can; /* must be the first member */ + struct napi_struct napi; + struct net_device *dev; + int tx_object; + int current_status; + int last_status; + u16 (*read_reg) (struct c_can_priv *priv, void *reg); + void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val); + struct c_can_regs __iomem *regs; + unsigned long irq_flags; /* for request_irq() */ + unsigned int tx_next; + unsigned int tx_echo; + void *priv; /* for board-specific data */ +}; + +struct net_device *alloc_c_can_dev(void); +void free_c_can_dev(struct net_device *dev); +int register_c_can_dev(struct net_device *dev); +void unregister_c_can_dev(struct net_device *dev); + +#endif /* C_CAN_H */ diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c new file mode 100644 index 000000000000..e629b961ae2d --- /dev/null +++ b/drivers/net/can/c_can/c_can_platform.c @@ -0,0 +1,215 @@ +/* + * Platform CAN bus driver for Bosch C_CAN controller + * + * Copyright (C) 2010 ST Microelectronics + * Bhupesh Sharma <bhupesh.sharma@st.com> + * + * Borrowed heavily from the C_CAN driver originally written by: + * Copyright (C) 2007 + * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de> + * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch> + * + * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B. + * Bosch C_CAN user manual can be obtained from: + * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/ + * users_manual_c_can.pdf + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/version.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/if_arp.h> +#include <linux/if_ether.h> +#include <linux/list.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/clk.h> + +#include <linux/can/dev.h> + +#include "c_can.h" + +/* + * 16-bit c_can registers can be arranged differently in the memory + * architecture of different implementations. For example: 16-bit + * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. + * Handle the same by providing a common read/write interface. + */ +static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv, + void *reg) +{ + return readw(reg); +} + +static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv, + void *reg, u16 val) +{ + writew(val, reg); +} + +static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv, + void *reg) +{ + return readw(reg + (long)reg - (long)priv->regs); +} + +static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv, + void *reg, u16 val) +{ + writew(val, reg + (long)reg - (long)priv->regs); +} + +static int __devinit c_can_plat_probe(struct platform_device *pdev) +{ + int ret; + void __iomem *addr; + struct net_device *dev; + struct c_can_priv *priv; + struct resource *mem, *irq; +#ifdef CONFIG_HAVE_CLK + struct clk *clk; + + /* get the appropriate clk */ + clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "no clock defined\n"); + ret = -ENODEV; + goto exit; + } +#endif + + /* get the platform data */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!mem || (irq <= 0)) { + ret = -ENODEV; + goto exit_free_clk; + } + + if (!request_mem_region(mem->start, resource_size(mem), + KBUILD_MODNAME)) { + dev_err(&pdev->dev, "resource unavailable\n"); + ret = -ENODEV; + goto exit_free_clk; + } + + addr = ioremap(mem->start, resource_size(mem)); + if (!addr) { + dev_err(&pdev->dev, "failed to map can port\n"); + ret = -ENOMEM; + goto exit_release_mem; + } + + /* allocate the c_can device */ + dev = alloc_c_can_dev(); + if (!dev) { + ret = -ENOMEM; + goto exit_iounmap; + } + + priv = netdev_priv(dev); + + dev->irq = irq->start; + priv->regs = addr; +#ifdef CONFIG_HAVE_CLK + priv->can.clock.freq = clk_get_rate(clk); + priv->priv = clk; +#endif + + switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) { + case IORESOURCE_MEM_32BIT: + priv->read_reg = c_can_plat_read_reg_aligned_to_32bit; + priv->write_reg = c_can_plat_write_reg_aligned_to_32bit; + break; + case IORESOURCE_MEM_16BIT: + default: + priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; + priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; + break; + } + + platform_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + ret = register_c_can_dev(dev); + if (ret) { + dev_err(&pdev->dev, "registering %s failed (err=%d)\n", + KBUILD_MODNAME, ret); + goto exit_free_device; + } + + dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", + KBUILD_MODNAME, priv->regs, dev->irq); + return 0; + +exit_free_device: + platform_set_drvdata(pdev, NULL); + free_c_can_dev(dev); +exit_iounmap: + iounmap(addr); +exit_release_mem: + release_mem_region(mem->start, resource_size(mem)); +exit_free_clk: +#ifdef CONFIG_HAVE_CLK + clk_put(clk); +exit: +#endif + dev_err(&pdev->dev, "probe failed\n"); + + return ret; +} + +static int __devexit c_can_plat_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct c_can_priv *priv = netdev_priv(dev); + struct resource *mem; + + unregister_c_can_dev(dev); + platform_set_drvdata(pdev, NULL); + + free_c_can_dev(dev); + iounmap(priv->regs); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(mem->start, resource_size(mem)); + +#ifdef CONFIG_HAVE_CLK + clk_put(priv->priv); +#endif + + return 0; +} + +static struct platform_driver c_can_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = c_can_plat_probe, + .remove = __devexit_p(c_can_plat_remove), +}; + +static int __init c_can_plat_init(void) +{ + return platform_driver_register(&c_can_plat_driver); +} +module_init(c_can_plat_init); + +static void __exit c_can_plat_exit(void) +{ + platform_driver_unregister(&c_can_plat_driver); +} +module_exit(c_can_plat_exit); + +MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller"); diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 05a52754f486..dc53c831ea95 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -659,7 +659,7 @@ failed: static void unlink_all_urbs(struct esd_usb2 *dev) { struct esd_usb2_net_priv *priv; - int i; + int i, j; usb_kill_anchored_urbs(&dev->rx_submitted); for (i = 0; i < dev->net_count; i++) { @@ -668,8 +668,8 @@ static void unlink_all_urbs(struct esd_usb2 *dev) usb_kill_anchored_urbs(&priv->tx_submitted); atomic_set(&priv->active_tx_jobs, 0); - for (i = 0; i < MAX_TX_URBS; i++) - priv->tx_contexts[i].echo_index = MAX_TX_URBS; + for (j = 0; j < MAX_TX_URBS; j++) + priv->tx_contexts[j].echo_index = MAX_TX_URBS; } } } diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 302be4aa69d6..8cca60e43444 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -65,7 +65,14 @@ static LIST_HEAD(cnic_udev_list); static DEFINE_RWLOCK(cnic_dev_lock); static DEFINE_MUTEX(cnic_lock); -static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; +static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; + +/* helper function, assuming cnic_lock is held */ +static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type) +{ + return rcu_dereference_protected(cnic_ulp_tbl[type], + lockdep_is_held(&cnic_lock)); +} static int cnic_service_bnx2(void *, void *); static int cnic_service_bnx2x(void *, void *); @@ -435,7 +442,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) return -EINVAL; } mutex_lock(&cnic_lock); - if (cnic_ulp_tbl[ulp_type]) { + if (cnic_ulp_tbl_prot(ulp_type)) { pr_err("%s: Type %d has already been registered\n", __func__, ulp_type); mutex_unlock(&cnic_lock); @@ -478,7 +485,7 @@ int cnic_unregister_driver(int ulp_type) return -EINVAL; } mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl[ulp_type]; + ulp_ops = cnic_ulp_tbl_prot(ulp_type); if (!ulp_ops) { pr_err("%s: Type %d has not been registered\n", __func__, ulp_type); @@ -529,7 +536,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type, return -EINVAL; } mutex_lock(&cnic_lock); - if (cnic_ulp_tbl[ulp_type] == NULL) { + if (cnic_ulp_tbl_prot(ulp_type) == NULL) { pr_err("%s: Driver with type %d has not been registered\n", __func__, ulp_type); mutex_unlock(&cnic_lock); @@ -544,7 +551,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type, clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); cp->ulp_handle[ulp_type] = ulp_ctx; - ulp_ops = cnic_ulp_tbl[ulp_type]; + ulp_ops = cnic_ulp_tbl_prot(ulp_type); rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); cnic_hold(dev); @@ -2970,7 +2977,8 @@ static void cnic_ulp_stop(struct cnic_dev *dev) struct cnic_ulp_ops *ulp_ops; mutex_lock(&cnic_lock); - ulp_ops = cp->ulp_ops[if_type]; + ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], + lockdep_is_held(&cnic_lock)); if (!ulp_ops) { mutex_unlock(&cnic_lock); continue; @@ -2994,7 +3002,8 @@ static void cnic_ulp_start(struct cnic_dev *dev) struct cnic_ulp_ops *ulp_ops; mutex_lock(&cnic_lock); - ulp_ops = cp->ulp_ops[if_type]; + ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], + lockdep_is_held(&cnic_lock)); if (!ulp_ops || !ulp_ops->cnic_start) { mutex_unlock(&cnic_lock); continue; @@ -3058,7 +3067,7 @@ static void cnic_ulp_init(struct cnic_dev *dev) struct cnic_ulp_ops *ulp_ops; mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl[i]; + ulp_ops = cnic_ulp_tbl_prot(i); if (!ulp_ops || !ulp_ops->cnic_init) { mutex_unlock(&cnic_lock); continue; @@ -3082,7 +3091,7 @@ static void cnic_ulp_exit(struct cnic_dev *dev) struct cnic_ulp_ops *ulp_ops; mutex_lock(&cnic_lock); - ulp_ops = cnic_ulp_tbl[i]; + ulp_ops = cnic_ulp_tbl_prot(i); if (!ulp_ops || !ulp_ops->cnic_exit) { mutex_unlock(&cnic_lock); continue; @@ -3398,17 +3407,14 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr, struct dst_entry **dst) { #if defined(CONFIG_INET) - struct flowi fl; - int err; struct rtable *rt; - memset(&fl, 0, sizeof(fl)); - fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; - - err = ip_route_output_key(&init_net, &rt, &fl); - if (!err) + rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0); + if (!IS_ERR(rt)) { *dst = &rt->dst; - return err; + return 0; + } + return PTR_ERR(rt); #else return -ENETUNREACH; #endif @@ -3418,14 +3424,14 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, struct dst_entry **dst) { #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) - struct flowi fl; + struct flowi6 fl6; - memset(&fl, 0, sizeof(fl)); - ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr); - if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL) - fl.oif = dst_addr->sin6_scope_id; + memset(&fl6, 0, sizeof(fl6)); + ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr); + if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) + fl6.flowi6_oif = dst_addr->sin6_scope_id; - *dst = ip6_route_output(&init_net, NULL, &fl); + *dst = ip6_route_output(&init_net, NULL, &fl6); if (*dst) return 0; #endif @@ -4187,6 +4193,14 @@ static void cnic_enable_bnx2_int(struct cnic_dev *dev) BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); } +static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev) +{ + u32 max_conn; + + max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN); + dev->max_iscsi_conn = max_conn; +} + static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; @@ -4511,6 +4525,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) return err; } + cnic_get_bnx2_iscsi_info(dev); + return 0; } @@ -4722,129 +4738,6 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, cp->rx_cons = *cp->rx_cons_ptr; } -static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr, - u32 lower_addr) -{ - u32 val; - u8 mac[6]; - - val = CNIC_RD(dev, upper_addr); - - mac[0] = (u8) (val >> 8); - mac[1] = (u8) val; - - val = CNIC_RD(dev, lower_addr); - - mac[2] = (u8) (val >> 24); - mac[3] = (u8) (val >> 16); - mac[4] = (u8) (val >> 8); - mac[5] = (u8) val; - - if (is_valid_ether_addr(mac)) { - memcpy(dev->mac_addr, mac, 6); - return 0; - } else { - return -EINVAL; - } -} - -static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 base, base2, addr, addr1, val; - int port = CNIC_PORT(cp); - - dev->max_iscsi_conn = 0; - base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR); - if (base == 0) - return; - - base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 : - MISC_REG_GENERIC_CR_0)); - addr = BNX2X_SHMEM_ADDR(base, - dev_info.port_hw_config[port].iscsi_mac_upper); - - addr1 = BNX2X_SHMEM_ADDR(base, - dev_info.port_hw_config[port].iscsi_mac_lower); - - cnic_read_bnx2x_iscsi_mac(dev, addr, addr1); - - addr = BNX2X_SHMEM_ADDR(base, validity_map[port]); - val = CNIC_RD(dev, addr); - - if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) { - u16 val16; - - addr = BNX2X_SHMEM_ADDR(base, - drv_lic_key[port].max_iscsi_init_conn); - val16 = CNIC_RD16(dev, addr); - - if (val16) - val16 ^= 0x1e1e; - dev->max_iscsi_conn = val16; - } - - if (BNX2X_CHIP_IS_E2(cp->chip_id)) - dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; - - if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) { - int func = CNIC_FUNC(cp); - u32 mf_cfg_addr; - - if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr)) - mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2, - mf_cfg_addr)); - else - mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET; - - if (BNX2X_CHIP_IS_E2(cp->chip_id)) { - /* Must determine if the MF is SD vs SI mode */ - addr = BNX2X_SHMEM_ADDR(base, - dev_info.shared_feature_config.config); - val = CNIC_RD(dev, addr); - if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) == - SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) { - int rc; - - /* MULTI_FUNCTION_SI mode */ - addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr, - func_ext_config[func].func_cfg); - val = CNIC_RD(dev, addr); - if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD)) - dev->max_iscsi_conn = 0; - - if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)) - dev->max_fcoe_conn = 0; - - addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr, - func_ext_config[func]. - iscsi_mac_addr_upper); - addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr, - func_ext_config[func]. - iscsi_mac_addr_lower); - rc = cnic_read_bnx2x_iscsi_mac(dev, addr, - addr1); - if (rc && func > 1) - dev->max_iscsi_conn = 0; - - return; - } - } - - addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr, - func_mf_config[func].e1hov_tag); - - val = CNIC_RD(dev, addr); - val &= FUNC_MF_CFG_E1HOV_TAG_MASK; - if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { - dev->max_fcoe_conn = 0; - dev->max_iscsi_conn = 0; - } - } - if (!is_valid_ether_addr(dev->mac_addr)) - dev->max_iscsi_conn = 0; -} - static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) { struct cnic_local *cp = dev->cnic_priv; @@ -4926,8 +4819,6 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev) cnic_init_bnx2x_kcq(dev); - cnic_get_bnx2x_iscsi_info(dev); - /* Only 1 EQ */ CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); CNIC_WR(dev, BAR_CSTRORM_INTMEM + @@ -5281,15 +5172,11 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) dev_hold(dev); pci_dev_get(pdev); - if (pdev->device == PCI_DEVICE_ID_NX2_5709 || - pdev->device == PCI_DEVICE_ID_NX2_5709S) { - u8 rev; - - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); - if (rev < 0x10) { - pci_dev_put(pdev); - goto cnic_err; - } + if ((pdev->device == PCI_DEVICE_ID_NX2_5709 || + pdev->device == PCI_DEVICE_ID_NX2_5709S) && + (pdev->revision < 0x10)) { + pci_dev_put(pdev); + goto cnic_err; } pci_dev_put(pdev); @@ -5360,6 +5247,14 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) cdev->pcidev = pdev; cp->chip_id = ethdev->chip_id; + if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) + cdev->max_iscsi_conn = ethdev->max_iscsi_conn; + if (BNX2X_CHIP_IS_E2(cp->chip_id) && + !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) + cdev->max_fcoe_conn = ethdev->max_fcoe_conn; + + memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6); + cp->cnic_ops = &cnic_bnx2x_ops; cp->start_hw = cnic_start_bnx2x_hw; cp->stop_hw = cnic_stop_bnx2x_hw; diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h index b328f6c924c3..4456260c653c 100644 --- a/drivers/net/cnic.h +++ b/drivers/net/cnic.h @@ -220,7 +220,7 @@ struct cnic_local { #define ULP_F_INIT 0 #define ULP_F_START 1 #define ULP_F_CALL_PENDING 2 - struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; + struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE]; unsigned long cnic_local_flags; #define CNIC_LCL_FL_KWQ_INIT 0x0 diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h index 9f44e0ffe003..e01b49ee3591 100644 --- a/drivers/net/cnic_if.h +++ b/drivers/net/cnic_if.h @@ -12,8 +12,8 @@ #ifndef CNIC_IF_H #define CNIC_IF_H -#define CNIC_MODULE_VERSION "2.2.12" -#define CNIC_MODULE_RELDATE "Jan 03, 2011" +#define CNIC_MODULE_VERSION "2.2.13" +#define CNIC_MODULE_RELDATE "Jan 31, 2011" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 @@ -159,6 +159,9 @@ struct cnic_eth_dev { u32 drv_state; #define CNIC_DRV_STATE_REGD 0x00000001 #define CNIC_DRV_STATE_USING_MSIX 0x00000002 +#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004 +#define CNIC_DRV_STATE_NO_ISCSI 0x00000008 +#define CNIC_DRV_STATE_NO_FCOE 0x00000010 u32 chip_id; u32 max_kwqe_pending; struct pci_dev *pdev; @@ -176,6 +179,7 @@ struct cnic_eth_dev { u32 fcoe_init_cid; u16 iscsi_l2_client_id; u16 iscsi_l2_cid; + u8 iscsi_mac[ETH_ALEN]; int num_irq; struct cnic_irq irq_arr[MAX_CNIC_VEC]; diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c index d325e01a53e0..537a4b2e2020 100644 --- a/drivers/net/cs89x0.c +++ b/drivers/net/cs89x0.c @@ -95,6 +95,9 @@ Dmitry Pervushin : dpervushin@ru.mvista.com : PNX010X platform support + Domenico Andreoli : cavokz@gmail.com + : QQ2440 platform support + */ /* Always include 'config.h' first in case the user wants to turn on @@ -176,6 +179,10 @@ static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0}; #elif defined(CONFIG_ARCH_IXDP2X01) static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0}; static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0}; +#elif defined(CONFIG_MACH_QQ2440) +#include <mach/qq2440.h> +static unsigned int netcard_portlist[] __used __initdata = { QQ2440_CS8900_VIRT_BASE + 0x300, 0 }; +static unsigned int cs8900_irq_map[] = { QQ2440_CS8900_IRQ, 0, 0, 0 }; #elif defined(CONFIG_MACH_MX31ADS) #include <mach/board-mx31ads.h> static unsigned int netcard_portlist[] __used __initdata = { @@ -521,6 +528,10 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular) #endif lp->force = g_cs89x0_media__force; #endif + +#if defined(CONFIG_MACH_QQ2440) + lp->force |= FORCE_RJ45 | FORCE_FULL; +#endif } /* Grab the region so we can find another board if autoIRQ fails. */ @@ -943,10 +954,10 @@ skip_this_frame: static void __init reset_chip(struct net_device *dev) { #if !defined(CONFIG_MACH_MX31ADS) -#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01) +#if !defined(CS89x0_NONISA_IRQ) struct net_local *lp = netdev_priv(dev); int ioaddr = dev->base_addr; -#endif +#endif /* CS89x0_NONISA_IRQ */ int reset_start_time; writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); @@ -954,7 +965,7 @@ static void __init reset_chip(struct net_device *dev) /* wait 30 ms */ msleep(30); -#if !defined(CONFIG_MACH_IXDP2351) && !defined(CONFIG_ARCH_IXDP2X01) +#if !defined(CS89x0_NONISA_IRQ) if (lp->chip_type != CS8900) { /* Hardware problem requires PNP registers to be reconfigured after a reset */ writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT); @@ -965,7 +976,7 @@ static void __init reset_chip(struct net_device *dev) outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT); outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1); } -#endif /* IXDP2x01 */ +#endif /* CS89x0_NONISA_IRQ */ /* Wait until the chip is reset */ reset_start_time = jiffies; diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index ef02aa68c926..862804f32b6e 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -186,9 +186,10 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter, dev = NULL; if (grp) dev = vlan_group_get_device(grp, vlan); - } else + } else if (netif_is_bond_slave(dev)) { while (dev->master) dev = dev->master; + } return dev; } } @@ -967,8 +968,6 @@ static int nb_callback(struct notifier_block *self, unsigned long event, cxgb_neigh_update((struct neighbour *)ctx); break; } - case (NETEVENT_PMTU_UPDATE): - break; case (NETEVENT_REDIRECT):{ struct netevent_redirect *nr = ctx; cxgb_redirect(nr->old, nr->new); diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index ec35d458102c..5352c8a23f4d 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -2471,7 +2471,6 @@ static int netevent_cb(struct notifier_block *nb, unsigned long event, case NETEVENT_NEIGH_UPDATE: check_neigh_update(data); break; - case NETEVENT_PMTU_UPDATE: case NETEVENT_REDIRECT: default: break; diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 7018bfe408a4..082d6ea69920 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -1730,7 +1730,7 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) emac_read(EMAC_TXCARRIERSENSE); emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask); - ndev->stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); + ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN); emac_write(EMAC_TXUNDERRUN, stats_clear_mask); return &ndev->stats; diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 461dd6f905f7..317708113601 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -1593,10 +1593,15 @@ dm9000_probe(struct platform_device *pdev) ndev->dev_addr[i] = ior(db, i+DM9000_PAR); } - if (!is_valid_ether_addr(ndev->dev_addr)) + if (!is_valid_ether_addr(ndev->dev_addr)) { dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " "set using ifconfig\n", ndev->name); + random_ether_addr(ndev->dev_addr); + mac_src = "random"; + } + + platform_set_drvdata(pdev, ndev); ret = register_netdev(ndev); diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 13149983d07e..c516a7440bec 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h @@ -86,6 +86,7 @@ #define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ #define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ #define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_LSECCK 0x00001000 #define E1000_CTRL_EXT_PHYPDEN 0x00100000 /* Receive Descriptor bit definitions */ diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index e610e1369053..00bf595ebd67 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -364,6 +364,7 @@ struct e1000_adapter { /* structs defined in e1000_hw.h */ struct e1000_hw hw; + spinlock_t stats64_lock; struct e1000_hw_stats stats; struct e1000_phy_info phy_info; struct e1000_phy_stats phy_stats; @@ -494,7 +495,9 @@ extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter); extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter); extern void e1000e_free_rx_resources(struct e1000_adapter *adapter); extern void e1000e_free_tx_resources(struct e1000_adapter *adapter); -extern void e1000e_update_stats(struct e1000_adapter *adapter); +extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 + *stats); extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_get_hw_control(struct e1000_adapter *adapter); diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index fa08b6336cfb..07f09e96e453 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -46,15 +46,15 @@ struct e1000_stats { }; #define E1000_STAT(str, m) { \ - .stat_string = str, \ - .type = E1000_STATS, \ - .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ - .stat_offset = offsetof(struct e1000_adapter, m) } + .stat_string = str, \ + .type = E1000_STATS, \ + .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ + .stat_offset = offsetof(struct e1000_adapter, m) } #define E1000_NETDEV_STAT(str, m) { \ - .stat_string = str, \ - .type = NETDEV_STATS, \ - .sizeof_stat = sizeof(((struct net_device *)0)->m), \ - .stat_offset = offsetof(struct net_device, m) } + .stat_string = str, \ + .type = NETDEV_STATS, \ + .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \ + .stat_offset = offsetof(struct rtnl_link_stats64, m) } static const struct e1000_stats e1000_gstrings_stats[] = { E1000_STAT("rx_packets", stats.gprc), @@ -65,21 +65,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = { E1000_STAT("tx_broadcast", stats.bptc), E1000_STAT("rx_multicast", stats.mprc), E1000_STAT("tx_multicast", stats.mptc), - E1000_NETDEV_STAT("rx_errors", stats.rx_errors), - E1000_NETDEV_STAT("tx_errors", stats.tx_errors), - E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped), + E1000_NETDEV_STAT("rx_errors", rx_errors), + E1000_NETDEV_STAT("tx_errors", tx_errors), + E1000_NETDEV_STAT("tx_dropped", tx_dropped), E1000_STAT("multicast", stats.mprc), E1000_STAT("collisions", stats.colc), - E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors), - E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors), + E1000_NETDEV_STAT("rx_length_errors", rx_length_errors), + E1000_NETDEV_STAT("rx_over_errors", rx_over_errors), E1000_STAT("rx_crc_errors", stats.crcerrs), - E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors), + E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors), E1000_STAT("rx_no_buffer_count", stats.rnbc), E1000_STAT("rx_missed_errors", stats.mpc), E1000_STAT("tx_aborted_errors", stats.ecol), E1000_STAT("tx_carrier_errors", stats.tncrs), - E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors), - E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors), + E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors), + E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors), E1000_STAT("tx_window_errors", stats.latecol), E1000_STAT("tx_abort_late_coll", stats.latecol), E1000_STAT("tx_deferred_ok", stats.dc), @@ -433,13 +433,11 @@ static void e1000_get_regs(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; u32 *regs_buff = p; u16 phy_data; - u8 revision_id; memset(p, 0, E1000_REGS_LEN * sizeof(u32)); - pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); - - regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device; + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; regs_buff[0] = er32(CTRL); regs_buff[1] = er32(STATUS); @@ -684,20 +682,13 @@ static int e1000_set_ringparam(struct net_device *netdev, rx_old = adapter->rx_ring; err = -ENOMEM; - tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL); if (!tx_ring) goto err_alloc_tx; - /* - * use a memcpy to save any previously configured - * items like napi structs from having to be - * reinitialized - */ - memcpy(tx_ring, tx_old, sizeof(struct e1000_ring)); - rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); + rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL); if (!rx_ring) goto err_alloc_rx; - memcpy(rx_ring, rx_old, sizeof(struct e1000_ring)); adapter->tx_ring = tx_ring; adapter->rx_ring = rx_ring; @@ -1255,7 +1246,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; - u32 stat_reg = 0; u16 phy_reg = 0; s32 ret_val = 0; @@ -1363,8 +1353,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) * Set the ILOS bit on the fiber Nic if half duplex link is * detected. */ - stat_reg = er32(STATUS); - if ((stat_reg & E1000_STATUS_FD) == 0) + if ((er32(STATUS) & E1000_STATUS_FD) == 0) ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); } @@ -1677,10 +1666,13 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) } else { hw->mac.ops.check_for_link(hw); if (hw->mac.autoneg) - msleep(4000); + /* + * On some Phy/switch combinations, link establishment + * can take a few seconds more than expected. + */ + msleep(5000); - if (!(er32(STATUS) & - E1000_STATUS_LU)) + if (!(er32(STATUS) & E1000_STATUS_LU)) *data = 1; } return *data; @@ -1807,8 +1799,7 @@ static void e1000_get_wol(struct net_device *netdev, return; wol->supported = WAKE_UCAST | WAKE_MCAST | - WAKE_BCAST | WAKE_MAGIC | - WAKE_PHY | WAKE_ARP; + WAKE_BCAST | WAKE_MAGIC | WAKE_PHY; /* apply any specific unsupported masks here */ if (adapter->flags & FLAG_NO_WAKE_UCAST) { @@ -1829,19 +1820,16 @@ static void e1000_get_wol(struct net_device *netdev, wol->wolopts |= WAKE_MAGIC; if (adapter->wol & E1000_WUFC_LNKC) wol->wolopts |= WAKE_PHY; - if (adapter->wol & E1000_WUFC_ARP) - wol->wolopts |= WAKE_ARP; } -static int e1000_set_wol(struct net_device *netdev, - struct ethtool_wolinfo *wol) +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct e1000_adapter *adapter = netdev_priv(netdev); if (!(adapter->flags & FLAG_HAS_WOL) || !device_can_wakeup(&adapter->pdev->dev) || (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | - WAKE_MAGIC | WAKE_PHY | WAKE_ARP))) + WAKE_MAGIC | WAKE_PHY))) return -EOPNOTSUPP; /* these settings will always override what we currently have */ @@ -1857,8 +1845,6 @@ static int e1000_set_wol(struct net_device *netdev, adapter->wol |= E1000_WUFC_MAG; if (wol->wolopts & WAKE_PHY) adapter->wol |= E1000_WUFC_LNKC; - if (wol->wolopts & WAKE_ARP) - adapter->wol |= E1000_WUFC_ARP; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); @@ -1972,8 +1958,15 @@ static int e1000_set_coalesce(struct net_device *netdev, static int e1000_nway_reset(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); - if (netif_running(netdev)) - e1000e_reinit_locked(adapter); + + if (!netif_running(netdev)) + return -EAGAIN; + + if (!adapter->hw.mac.autoneg) + return -EINVAL; + + e1000e_reinit_locked(adapter); + return 0; } @@ -1982,14 +1975,15 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, u64 *data) { struct e1000_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 net_stats; int i; char *p = NULL; - e1000e_update_stats(adapter); + e1000e_get_stats64(netdev, &net_stats); for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { switch (e1000_gstrings_stats[i].type) { case NETDEV_STATS: - p = (char *) netdev + + p = (char *) &net_stats + e1000_gstrings_stats[i].stat_offset; break; case E1000_STATS: @@ -2014,7 +2008,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_TEST: - memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test)); + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); break; case ETH_SS_STATS: for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index bc0860a598c9..307e1ec22417 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -812,9 +812,8 @@ struct e1000_nvm_operations { struct e1000_mac_info { struct e1000_mac_operations ops; - - u8 addr[6]; - u8 perm_addr[6]; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; enum e1000_mac_type type; diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index fb46974cfec1..ce1dbfdca112 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -140,6 +140,11 @@ #define I82579_LPI_CTRL PHY_REG(772, 20) #define I82579_LPI_CTRL_ENABLE_MASK 0x6000 +/* EMI Registers */ +#define I82579_EMI_ADDR 0x10 +#define I82579_EMI_DATA 0x11 +#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ + /* Strapping Option Register - RO */ #define E1000_STRAP 0x0000C #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 @@ -302,9 +307,9 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) * the interconnect to PCIe mode. */ fwsm = er32(FWSM); - if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) { + if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) { ctrl = er32(CTRL); - ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; + ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; ew32(CTRL, ctrl); udelay(10); @@ -331,7 +336,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) goto out; /* Ungate automatic PHY configuration on non-managed 82579 */ - if ((hw->mac.type == e1000_pch2lan) && + if ((hw->mac.type == e1000_pch2lan) && !(fwsm & E1000_ICH_FWSM_FW_VALID)) { msleep(10); e1000_gate_hw_phy_config_ich8lan(hw, false); @@ -366,7 +371,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) case e1000_phy_82579: phy->ops.check_polarity = e1000_check_polarity_82577; phy->ops.force_speed_duplex = - e1000_phy_force_speed_duplex_82577; + e1000_phy_force_speed_duplex_82577; phy->ops.get_cable_length = e1000_get_cable_length_82577; phy->ops.get_info = e1000_get_phy_info_82577; phy->ops.commit = e1000e_phy_sw_reset; @@ -753,7 +758,13 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) if (rc) return rc; - if (adapter->hw.phy.type == e1000_phy_ife) { + /* + * Disable Jumbo Frame support on parts with Intel 10/100 PHY or + * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). + */ + if ((adapter->hw.phy.type == e1000_phy_ife) || + ((adapter->hw.mac.type >= e1000_pch2lan) && + (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; } @@ -1723,11 +1734,25 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) /* Configure the LCD with the OEM bits in NVM */ ret_val = e1000_oem_bits_config_ich8lan(hw, true); - /* Ungate automatic PHY configuration on non-managed 82579 */ - if ((hw->mac.type == e1000_pch2lan) && - !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { - msleep(10); - e1000_gate_hw_phy_config_ich8lan(hw, false); + if (hw->mac.type == e1000_pch2lan) { + /* Ungate automatic PHY configuration on non-managed 82579 */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + msleep(10); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + /* Set EEE LPI Update Timer to 200usec */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, + I82579_LPI_UPDATE_TIMER); + if (ret_val) + goto release; + ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, + 0x1387); +release: + hw->phy.ops.release(hw); } out: @@ -2104,7 +2129,6 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) { union ich8_hws_flash_status hsfsts; s32 ret_val = -E1000_ERR_NVM; - s32 i = 0; hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); @@ -2140,6 +2164,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); ret_val = 0; } else { + s32 i = 0; + /* * Otherwise poll for sometime so the current * cycle has a chance to end before giving up. diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index 68aa1749bf66..96921de5df2e 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -1978,15 +1978,15 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) { struct e1000_nvm_info *nvm = &hw->nvm; u32 eecd = er32(EECD); - u16 timeout = 0; u8 spi_stat_reg; if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + /* Clear SK and CS */ eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); ew32(EECD, eecd); udelay(1); - timeout = NVM_MAX_RETRY_SPI; /* * Read "Status Register" repeatedly until the LSB is cleared. diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 6d513a383340..a39d4a4d871c 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -54,7 +54,7 @@ #define DRV_EXTRAVERSION "-k2" -#define DRV_VERSION "1.2.20" DRV_EXTRAVERSION +#define DRV_VERSION "1.3.10" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -900,8 +900,6 @@ next_desc: adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; - netdev->stats.rx_bytes += total_rx_bytes; - netdev->stats.rx_packets += total_rx_packets; return cleaned; } @@ -1060,8 +1058,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) } adapter->total_tx_bytes += total_tx_bytes; adapter->total_tx_packets += total_tx_packets; - netdev->stats.tx_bytes += total_tx_bytes; - netdev->stats.tx_packets += total_tx_packets; return count < tx_ring->count; } @@ -1248,8 +1244,6 @@ next_desc: adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; - netdev->stats.rx_bytes += total_rx_bytes; - netdev->stats.rx_packets += total_rx_packets; return cleaned; } @@ -1328,7 +1322,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* an error means any chain goes out the window * too */ if (rx_ring->rx_skb_top) - dev_kfree_skb(rx_ring->rx_skb_top); + dev_kfree_skb_irq(rx_ring->rx_skb_top); rx_ring->rx_skb_top = NULL; goto next_desc; } @@ -1401,7 +1395,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* eth type trans needs skb->data to point to something */ if (!pskb_may_pull(skb, ETH_HLEN)) { e_err("pskb_may_pull failed.\n"); - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); goto next_desc; } @@ -1429,8 +1423,6 @@ next_desc: adapter->total_rx_bytes += total_rx_bytes; adapter->total_rx_packets += total_rx_packets; - netdev->stats.rx_bytes += total_rx_bytes; - netdev->stats.rx_packets += total_rx_packets; return cleaned; } @@ -1857,7 +1849,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter) int err = 0, vector = 0; if (strlen(netdev->name) < (IFNAMSIZ - 5)) - sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); + snprintf(adapter->rx_ring->name, + sizeof(adapter->rx_ring->name) - 1, + "%s-rx-0", netdev->name); else memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); err = request_irq(adapter->msix_entries[vector].vector, @@ -1870,7 +1864,9 @@ static int e1000_request_msix(struct e1000_adapter *adapter) vector++; if (strlen(netdev->name) < (IFNAMSIZ - 5)) - sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); + snprintf(adapter->tx_ring->name, + sizeof(adapter->tx_ring->name) - 1, + "%s-tx-0", netdev->name); else memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); err = request_irq(adapter->msix_entries[vector].vector, @@ -2734,7 +2730,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 rctl, rfctl; - u32 psrctl = 0; u32 pages = 0; /* Workaround Si errata on 82579 - configure jumbo frame flow */ @@ -2833,6 +2828,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) adapter->rx_ps_pages = 0; if (adapter->rx_ps_pages) { + u32 psrctl = 0; + /* Configure extra packet-split registers */ rfctl = er32(RFCTL); rfctl |= E1000_RFCTL_EXTEN; @@ -3034,7 +3031,6 @@ static void e1000_set_multi(struct net_device *netdev) struct netdev_hw_addr *ha; u8 *mta_list; u32 rctl; - int i; /* Check for Promiscuous and All Multicast modes */ @@ -3057,12 +3053,13 @@ static void e1000_set_multi(struct net_device *netdev) ew32(RCTL, rctl); if (!netdev_mc_empty(netdev)) { + int i = 0; + mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); if (!mta_list) return; /* prepare a packed array of only addresses. */ - i = 0; netdev_for_each_mc_addr(ha, netdev) memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); @@ -3359,6 +3356,8 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter) e1e_flush(); } +static void e1000e_update_stats(struct e1000_adapter *adapter); + void e1000e_down(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3393,6 +3392,11 @@ void e1000e_down(struct e1000_adapter *adapter) del_timer_sync(&adapter->phy_info_timer); netif_carrier_off(netdev); + + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + adapter->link_speed = 0; adapter->link_duplex = 0; @@ -3437,6 +3441,8 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + spin_lock_init(&adapter->stats64_lock); + e1000e_set_interrupt_capability(adapter); if (e1000_alloc_queues(adapter)) @@ -3918,7 +3924,7 @@ release: * e1000e_update_stats - Update the board statistics counters * @adapter: board private structure **/ -void e1000e_update_stats(struct e1000_adapter *adapter) +static void e1000e_update_stats(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; @@ -4030,10 +4036,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_phy_regs *phy = &adapter->phy_regs; - int ret_val; if ((er32(STATUS) & E1000_STATUS_LU) && (adapter->hw.phy.media_type == e1000_media_type_copper)) { + int ret_val; + ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr); ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr); ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise); @@ -4179,7 +4186,6 @@ static void e1000_watchdog_task(struct work_struct *work) struct e1000_ring *tx_ring = adapter->tx_ring; struct e1000_hw *hw = &adapter->hw; u32 link, tctl; - int tx_pending = 0; if (test_bit(__E1000_DOWN, &adapter->state)) return; @@ -4320,7 +4326,9 @@ static void e1000_watchdog_task(struct work_struct *work) } link_up: + spin_lock(&adapter->stats64_lock); e1000e_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; adapter->tpt_old = adapter->stats.tpt; @@ -4334,20 +4342,17 @@ link_up: e1000e_update_adaptive(&adapter->hw); - if (!netif_carrier_ok(netdev)) { - tx_pending = (e1000_desc_unused(tx_ring) + 1 < - tx_ring->count); - if (tx_pending) { - /* - * We've lost link, so the controller stops DMA, - * but we've got queued Tx work that's never going - * to get done, so reset controller to flush Tx. - * (Do the reset outside of interrupt context). - */ - schedule_work(&adapter->reset_task); - /* return immediately since reset is imminent */ - return; - } + if (!netif_carrier_ok(netdev) && + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) { + /* + * We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; } /* Simple mode for Interrupt Throttle Rate (ITR) */ @@ -4411,13 +4416,13 @@ static int e1000_tso(struct e1000_adapter *adapter, u32 cmd_length = 0; u16 ipcse = 0, tucse, mss; u8 ipcss, ipcso, tucss, tucso, hdr_len; - int err; if (!skb_is_gso(skb)) return 0; if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) return err; } @@ -4928,16 +4933,55 @@ static void e1000_reset_task(struct work_struct *work) } /** - * e1000_get_stats - Get System Network Statistics + * e1000_get_stats64 - Get System Network Statistics * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer * * Returns the address of the device statistics structure. - * The statistics are actually updated from the timer callback. **/ -static struct net_device_stats *e1000_get_stats(struct net_device *netdev) +struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { - /* only return the current stats */ - return &netdev->stats; + struct e1000_adapter *adapter = netdev_priv(netdev); + + memset(stats, 0, sizeof(struct rtnl_link_stats64)); + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + /* Fill out the OS statistics structure */ + stats->rx_bytes = adapter->stats.gorc; + stats->rx_packets = adapter->stats.gprc; + stats->tx_bytes = adapter->stats.gotc; + stats->tx_packets = adapter->stats.gptc; + stats->multicast = adapter->stats.mprc; + stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* + * RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + stats->rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + stats->rx_crc_errors = adapter->stats.crcerrs; + stats->rx_frame_errors = adapter->stats.algnerrc; + stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + stats->tx_errors = adapter->stats.ecol + + adapter->stats.latecol; + stats->tx_aborted_errors = adapter->stats.ecol; + stats->tx_window_errors = adapter->stats.latecol; + stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + spin_unlock(&adapter->stats64_lock); + return stats; } /** @@ -5507,9 +5551,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data) { struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); - int vector, msix_irq; if (adapter->msix_entries) { + int vector, msix_irq; + vector = 0; msix_irq = adapter->msix_entries[vector].vector; disable_irq(msix_irq); @@ -5706,7 +5751,7 @@ static const struct net_device_ops e1000e_netdev_ops = { .ndo_open = e1000_open, .ndo_stop = e1000_close, .ndo_start_xmit = e1000_xmit_frame, - .ndo_get_stats = e1000_get_stats, + .ndo_get_stats64 = e1000e_get_stats64, .ndo_set_multicast_list = e1000_set_multi, .ndo_set_mac_address = e1000_set_mac, .ndo_change_mtu = e1000_change_mtu, diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 6bea051b134b..6ae31fcfb629 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c @@ -2409,9 +2409,7 @@ static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) { s32 ret_val; - u32 page_select = 0; u32 page = offset >> IGP_PAGE_SHIFT; - u32 page_shift = 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) @@ -2427,6 +2425,8 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + /* * Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for @@ -2468,9 +2468,7 @@ out: s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) { s32 ret_val; - u32 page_select = 0; u32 page = offset >> IGP_PAGE_SHIFT; - u32 page_shift = 0; ret_val = hw->phy.ops.acquire(hw); if (ret_val) @@ -2486,6 +2484,8 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + /* * Page select is register 31 for phy address 1 and 22 for * phy address 2 and 3. Page select is shifted only for diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile index e7b6c31880ba..2e573be16c13 100644 --- a/drivers/net/enic/Makefile +++ b/drivers/net/enic/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_ENIC) := enic.o enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ - enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o + enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index a937f49d9db7..3a3c3c8a3a9b 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -32,13 +32,13 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "1.4.1.10" -#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" +#define DRV_VERSION "2.1.1.12" +#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 -#define ENIC_WQ_MAX 8 -#define ENIC_RQ_MAX 8 +#define ENIC_WQ_MAX 1 +#define ENIC_RQ_MAX 1 #define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) @@ -49,7 +49,7 @@ struct enic_msix_entry { void *devid; }; -#define ENIC_SET_APPLIED (1 << 0) +#define ENIC_PORT_REQUEST_APPLIED (1 << 0) #define ENIC_SET_REQUEST (1 << 1) #define ENIC_SET_NAME (1 << 2) #define ENIC_SET_INSTANCE (1 << 3) @@ -101,7 +101,6 @@ struct enic { /* receive queue cache line section */ ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; unsigned int rq_count; - int (*rq_alloc_buf)(struct vnic_rq *rq); u64 rq_truncated_pkts; u64 rq_bad_fcs; struct napi_struct napi[ENIC_RQ_MAX]; diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c new file mode 100644 index 000000000000..37ad3a1c82ee --- /dev/null +++ b/drivers/net/enic/enic_dev.c @@ -0,0 +1,221 @@ +/* + * Copyright 2011 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/pci.h> +#include <linux/etherdevice.h> + +#include "vnic_dev.h" +#include "vnic_vic.h" +#include "enic_res.h" +#include "enic.h" +#include "enic_dev.h" + +int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_fw_info(enic->vdev, fw_info); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_stats_dump(enic->vdev, vstats); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_add_station_addr(struct enic *enic) +{ + int err; + + if (!is_valid_ether_addr(enic->netdev->dev_addr)) + return -EADDRNOTAVAIL; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_del_station_addr(struct enic *enic) +{ + int err; + + if (!is_valid_ether_addr(enic->netdev->dev_addr)) + return -EADDRNOTAVAIL; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, + int broadcast, int promisc, int allmulti) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_packet_filter(enic->vdev, directed, + multicast, broadcast, promisc, allmulti); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_add_addr(struct enic *enic, u8 *addr) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_add_addr(enic->vdev, addr); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_del_addr(struct enic *enic, u8 *addr) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_del_addr(enic->vdev, addr); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_notify_unset(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_notify_unset(enic->vdev); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_hang_notify(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_hang_notify(enic->vdev); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, + IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_enable(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_enable_wait(enic->vdev); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_disable(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_disable(enic->vdev); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_vnic_dev_deinit(struct enic *enic) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_deinit(enic->vdev); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_init_prov(enic->vdev, + (u8 *)vp, vic_provinfo_size(vp)); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +int enic_dev_init_done(struct enic *enic, int *done, int *error) +{ + int err; + + spin_lock(&enic->devcmd_lock); + err = vnic_dev_init_done(enic->vdev, done, error); + spin_unlock(&enic->devcmd_lock); + + return err; +} + +/* rtnl lock is held */ +void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct enic *enic = netdev_priv(netdev); + + spin_lock(&enic->devcmd_lock); + enic_add_vlan(enic, vid); + spin_unlock(&enic->devcmd_lock); +} + +/* rtnl lock is held */ +void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct enic *enic = netdev_priv(netdev); + + spin_lock(&enic->devcmd_lock); + enic_del_vlan(enic, vid); + spin_unlock(&enic->devcmd_lock); +} diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h new file mode 100644 index 000000000000..495f57fcb887 --- /dev/null +++ b/drivers/net/enic/enic_dev.h @@ -0,0 +1,41 @@ +/* + * Copyright 2011 Cisco Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef _ENIC_DEV_H_ +#define _ENIC_DEV_H_ + +int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info); +int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats); +int enic_dev_add_station_addr(struct enic *enic); +int enic_dev_del_station_addr(struct enic *enic); +int enic_dev_packet_filter(struct enic *enic, int directed, int multicast, + int broadcast, int promisc, int allmulti); +int enic_dev_add_addr(struct enic *enic, u8 *addr); +int enic_dev_del_addr(struct enic *enic, u8 *addr); +void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid); +void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); +int enic_dev_notify_unset(struct enic *enic); +int enic_dev_hang_notify(struct enic *enic); +int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic); +int enic_dev_enable(struct enic *enic); +int enic_dev_disable(struct enic *enic); +int enic_vnic_dev_deinit(struct enic *enic); +int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp); +int enic_dev_init_done(struct enic *enic, int *done, int *error); + +#endif /* _ENIC_DEV_H_ */ diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index a0af48c51fb3..8b9cad5e9712 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -44,6 +44,7 @@ #include "vnic_vic.h" #include "enic_res.h" #include "enic.h" +#include "enic_dev.h" #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) @@ -190,18 +191,6 @@ static int enic_get_settings(struct net_device *netdev, return 0; } -static int enic_dev_fw_info(struct enic *enic, - struct vnic_devcmd_fw_info **fw_info) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_fw_info(enic->vdev, fw_info); - spin_unlock(&enic->devcmd_lock); - - return err; -} - static void enic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { @@ -246,17 +235,6 @@ static int enic_get_sset_count(struct net_device *netdev, int sset) } } -static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_stats_dump(enic->vdev, vstats); - spin_unlock(&enic->devcmd_lock); - - return err; -} - static void enic_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { @@ -896,9 +874,10 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev) return net_stats; } -static void enic_reset_multicast_list(struct enic *enic) +static void enic_reset_addr_lists(struct enic *enic) { enic->mc_count = 0; + enic->uc_count = 0; enic->flags = 0; } @@ -919,32 +898,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr) return 0; } -static int enic_dev_add_station_addr(struct enic *enic) -{ - int err = 0; - - if (is_valid_ether_addr(enic->netdev->dev_addr)) { - spin_lock(&enic->devcmd_lock); - err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr); - spin_unlock(&enic->devcmd_lock); - } - - return err; -} - -static int enic_dev_del_station_addr(struct enic *enic) -{ - int err = 0; - - if (is_valid_ether_addr(enic->netdev->dev_addr)) { - spin_lock(&enic->devcmd_lock); - err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr); - spin_unlock(&enic->devcmd_lock); - } - - return err; -} - static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) { struct enic *enic = netdev_priv(netdev); @@ -989,42 +942,7 @@ static int enic_set_mac_address(struct net_device *netdev, void *p) return enic_dev_add_station_addr(enic); } -static int enic_dev_packet_filter(struct enic *enic, int directed, - int multicast, int broadcast, int promisc, int allmulti) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_packet_filter(enic->vdev, directed, - multicast, broadcast, promisc, allmulti); - spin_unlock(&enic->devcmd_lock); - - return err; -} - -static int enic_dev_add_addr(struct enic *enic, u8 *addr) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_add_addr(enic->vdev, addr); - spin_unlock(&enic->devcmd_lock); - - return err; -} - -static int enic_dev_del_addr(struct enic *enic, u8 *addr) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_del_addr(enic->vdev, addr); - spin_unlock(&enic->devcmd_lock); - - return err; -} - -static void enic_add_multicast_addr_list(struct enic *enic) +static void enic_update_multicast_addr_list(struct enic *enic) { struct net_device *netdev = enic->netdev; struct netdev_hw_addr *ha; @@ -1079,7 +997,7 @@ static void enic_add_multicast_addr_list(struct enic *enic) enic->mc_count = mc_count; } -static void enic_add_unicast_addr_list(struct enic *enic) +static void enic_update_unicast_addr_list(struct enic *enic) { struct net_device *netdev = enic->netdev; struct netdev_hw_addr *ha; @@ -1156,9 +1074,9 @@ static void enic_set_rx_mode(struct net_device *netdev) } if (!promisc) { - enic_add_unicast_addr_list(enic); + enic_update_unicast_addr_list(enic); if (!allmulti) - enic_add_multicast_addr_list(enic); + enic_update_multicast_addr_list(enic); } } @@ -1170,26 +1088,6 @@ static void enic_vlan_rx_register(struct net_device *netdev, enic->vlan_group = vlan_group; } -/* rtnl lock is held */ -static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) -{ - struct enic *enic = netdev_priv(netdev); - - spin_lock(&enic->devcmd_lock); - enic_add_vlan(enic, vid); - spin_unlock(&enic->devcmd_lock); -} - -/* rtnl lock is held */ -static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) -{ - struct enic *enic = netdev_priv(netdev); - - spin_lock(&enic->devcmd_lock); - enic_del_vlan(enic, vid); - spin_unlock(&enic->devcmd_lock); -} - /* netif_tx_lock held, BHs disabled */ static void enic_tx_timeout(struct net_device *netdev) { @@ -1197,40 +1095,6 @@ static void enic_tx_timeout(struct net_device *netdev) schedule_work(&enic->reset); } -static int enic_vnic_dev_deinit(struct enic *enic) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_deinit(enic->vdev); - spin_unlock(&enic->devcmd_lock); - - return err; -} - -static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_init_prov(enic->vdev, - (u8 *)vp, vic_provinfo_size(vp)); - spin_unlock(&enic->devcmd_lock); - - return err; -} - -static int enic_dev_init_done(struct enic *enic, int *done, int *error) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_init_done(enic->vdev, done, error); - spin_unlock(&enic->devcmd_lock); - - return err; -} - static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct enic *enic = netdev_priv(netdev); @@ -1262,6 +1126,8 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac) if (err) return err; + enic_reset_addr_lists(enic); + switch (enic->pp.request) { case PORT_REQUEST_ASSOCIATE: @@ -1318,18 +1184,20 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac) vic_provinfo_free(vp); if (err) return err; - - enic->pp.set |= ENIC_SET_APPLIED; break; case PORT_REQUEST_DISASSOCIATE: - enic->pp.set &= ~ENIC_SET_APPLIED; break; default: return -EINVAL; } + /* Set flag to indicate that the port assoc/disassoc + * request has been sent out to fw + */ + enic->pp.set |= ENIC_PORT_REQUEST_APPLIED; + return 0; } @@ -1379,9 +1247,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, if (is_zero_ether_addr(netdev->dev_addr)) random_ether_addr(netdev->dev_addr); - } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) { - if (!is_zero_ether_addr(enic->pp.mac_addr)) - enic_dev_del_addr(enic, enic->pp.mac_addr); } memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile)); @@ -1390,9 +1255,6 @@ static int enic_set_vf_port(struct net_device *netdev, int vf, if (err) goto set_port_profile_cleanup; - if (!is_zero_ether_addr(enic->pp.mac_addr)) - enic_dev_add_addr(enic, enic->pp.mac_addr); - set_port_profile_cleanup: memset(enic->pp.vf_mac, 0, ETH_ALEN); @@ -1411,7 +1273,7 @@ static int enic_get_vf_port(struct net_device *netdev, int vf, int err, error, done; u16 response = PORT_PROFILE_RESPONSE_SUCCESS; - if (!(enic->pp.set & ENIC_SET_APPLIED)) + if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED)) return -ENODATA; err = enic_dev_init_done(enic, &done, &error); @@ -1489,62 +1351,6 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq) return 0; } -static int enic_rq_alloc_buf_a1(struct vnic_rq *rq) -{ - struct rq_enet_desc *desc = vnic_rq_next_desc(rq); - - if (vnic_rq_posting_soon(rq)) { - - /* SW workaround for A0 HW erratum: if we're just about - * to write posted_index, insert a dummy desc - * of type resvd - */ - - rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0); - vnic_rq_post(rq, 0, 0, 0, 0); - } else { - return enic_rq_alloc_buf(rq); - } - - return 0; -} - -static int enic_dev_hw_version(struct enic *enic, - enum vnic_dev_hw_version *hw_ver) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_hw_version(enic->vdev, hw_ver); - spin_unlock(&enic->devcmd_lock); - - return err; -} - -static int enic_set_rq_alloc_buf(struct enic *enic) -{ - enum vnic_dev_hw_version hw_ver; - int err; - - err = enic_dev_hw_version(enic, &hw_ver); - if (err) - return err; - - switch (hw_ver) { - case VNIC_DEV_HW_VER_A1: - enic->rq_alloc_buf = enic_rq_alloc_buf_a1; - break; - case VNIC_DEV_HW_VER_A2: - case VNIC_DEV_HW_VER_UNKNOWN: - enic->rq_alloc_buf = enic_rq_alloc_buf; - break; - default: - return -ENODEV; - } - - return 0; -} - static void enic_rq_indicate_buf(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped, void *opaque) @@ -1681,7 +1487,7 @@ static int enic_poll(struct napi_struct *napi, int budget) 0 /* don't unmask intr */, 0 /* don't reset intr timer */); - err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); + err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); /* Buffer allocation failed. Stay in polling * mode so we can try to fill the ring again. @@ -1731,7 +1537,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget) 0 /* don't unmask intr */, 0 /* don't reset intr timer */); - err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf); + err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); /* Buffer allocation failed. Stay in polling mode * so we can try to fill the ring again. @@ -1901,39 +1707,6 @@ static int enic_dev_notify_set(struct enic *enic) return err; } -static int enic_dev_notify_unset(struct enic *enic) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_notify_unset(enic->vdev); - spin_unlock(&enic->devcmd_lock); - - return err; -} - -static int enic_dev_enable(struct enic *enic) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_enable_wait(enic->vdev); - spin_unlock(&enic->devcmd_lock); - - return err; -} - -static int enic_dev_disable(struct enic *enic) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_disable(enic->vdev); - spin_unlock(&enic->devcmd_lock); - - return err; -} - static void enic_notify_timer_start(struct enic *enic) { switch (vnic_dev_get_intr_mode(enic->vdev)) { @@ -1967,7 +1740,7 @@ static int enic_open(struct net_device *netdev) } for (i = 0; i < enic->rq_count; i++) { - vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); + vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); /* Need at least one buffer on ring to get going */ if (vnic_rq_desc_used(&enic->rq[i]) == 0) { netdev_err(netdev, "Unable to alloc receive buffers\n"); @@ -2285,29 +2058,6 @@ static int enic_set_rss_nic_cfg(struct enic *enic) rss_hash_bits, rss_base_cpu, rss_enable); } -static int enic_dev_hang_notify(struct enic *enic) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_hang_notify(enic->vdev); - spin_unlock(&enic->devcmd_lock); - - return err; -} - -static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic) -{ - int err; - - spin_lock(&enic->devcmd_lock); - err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev, - IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN); - spin_unlock(&enic->devcmd_lock); - - return err; -} - static void enic_reset(struct work_struct *work) { struct enic *enic = container_of(work, struct enic, reset); @@ -2320,7 +2070,7 @@ static void enic_reset(struct work_struct *work) enic_dev_hang_notify(enic); enic_stop(enic->netdev); enic_dev_hang_reset(enic); - enic_reset_multicast_list(enic); + enic_reset_addr_lists(enic); enic_init_vnic_resources(enic); enic_set_rss_nic_cfg(enic); enic_dev_set_ig_vlan_rewrite_mode(enic); @@ -2332,7 +2082,7 @@ static void enic_reset(struct work_struct *work) static int enic_set_intr_mode(struct enic *enic) { unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); - unsigned int m = 1; + unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); unsigned int i; /* Set interrupt mode (INTx, MSI, MSI-X) depending @@ -2475,9 +2225,7 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { .ndo_tx_timeout = enic_tx_timeout, .ndo_set_vf_port = enic_set_vf_port, .ndo_get_vf_port = enic_get_vf_port, -#ifdef IFLA_VF_MAX .ndo_set_vf_mac = enic_set_vf_mac, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = enic_poll_controller, #endif @@ -2556,25 +2304,12 @@ static int enic_dev_init(struct enic *enic) enic_init_vnic_resources(enic); - err = enic_set_rq_alloc_buf(enic); - if (err) { - dev_err(dev, "Failed to set RQ buffer allocator, aborting\n"); - goto err_out_free_vnic_resources; - } - err = enic_set_rss_nic_cfg(enic); if (err) { dev_err(dev, "Failed to config nic, aborting\n"); goto err_out_free_vnic_resources; } - err = enic_dev_set_ig_vlan_rewrite_mode(enic); - if (err) { - dev_err(dev, - "Failed to set ingress vlan rewrite mode, aborting.\n"); - goto err_out_free_vnic_resources; - } - switch (vnic_dev_get_intr_mode(enic->vdev)) { default: netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); @@ -2713,6 +2448,22 @@ static int __devinit enic_probe(struct pci_dev *pdev, goto err_out_vnic_unregister; } + /* Setup devcmd lock + */ + + spin_lock_init(&enic->devcmd_lock); + + /* + * Set ingress vlan rewrite mode before vnic initialization + */ + + err = enic_dev_set_ig_vlan_rewrite_mode(enic); + if (err) { + dev_err(dev, + "Failed to set ingress vlan rewrite mode, aborting.\n"); + goto err_out_dev_close; + } + /* Issue device init to initialize the vnic-to-switch link. * We'll start with carrier off and wait for link UP * notification later to turn on carrier. We don't need @@ -2736,11 +2487,6 @@ static int __devinit enic_probe(struct pci_dev *pdev, } } - /* Setup devcmd lock - */ - - spin_lock_init(&enic->devcmd_lock); - err = enic_dev_init(enic); if (err) { dev_err(dev, "Device initialization failed, aborting\n"); diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c index fb35d8b17668..c089b362a36f 100644 --- a/drivers/net/enic/vnic_dev.c +++ b/drivers/net/enic/vnic_dev.c @@ -408,10 +408,17 @@ int vnic_dev_fw_info(struct vnic_dev *vdev, if (!vdev->fw_info) return -ENOMEM; + memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info)); + a0 = vdev->fw_info_pa; + a1 = sizeof(struct vnic_devcmd_fw_info); /* only get fw_info once and cache it */ err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); + if (err == ERR_ECMDUNKNOWN) { + err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD, + &a0, &a1, wait); + } } *fw_info = vdev->fw_info; @@ -419,25 +426,6 @@ int vnic_dev_fw_info(struct vnic_dev *vdev, return err; } -int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver) -{ - struct vnic_devcmd_fw_info *fw_info; - int err; - - err = vnic_dev_fw_info(vdev, &fw_info); - if (err) - return err; - - if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0) - *hw_ver = VNIC_DEV_HW_VER_A1; - else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0) - *hw_ver = VNIC_DEV_HW_VER_A2; - else - *hw_ver = VNIC_DEV_HW_VER_UNKNOWN; - - return 0; -} - int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, void *value) { diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h index 05f9a24cd459..e837546213a8 100644 --- a/drivers/net/enic/vnic_dev.h +++ b/drivers/net/enic/vnic_dev.h @@ -44,12 +44,6 @@ static inline void writeq(u64 val, void __iomem *reg) #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -enum vnic_dev_hw_version { - VNIC_DEV_HW_VER_UNKNOWN, - VNIC_DEV_HW_VER_A1, - VNIC_DEV_HW_VER_A2, -}; - enum vnic_dev_intr_mode { VNIC_DEV_INTR_MODE_UNKNOWN, VNIC_DEV_INTR_MODE_INTX, @@ -93,8 +87,6 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait); int vnic_dev_fw_info(struct vnic_dev *vdev, struct vnic_devcmd_fw_info **fw_info); -int vnic_dev_hw_version(struct vnic_dev *vdev, - enum vnic_dev_hw_version *hw_ver); int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, void *value); int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats); diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h index 9abb3d51dea1..d833a071bac5 100644 --- a/drivers/net/enic/vnic_devcmd.h +++ b/drivers/net/enic/vnic_devcmd.h @@ -80,8 +80,34 @@ enum vnic_devcmd_cmd { CMD_NONE = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_NONE, 0), - /* mcpu fw info in mem: (u64)a0=paddr to struct vnic_devcmd_fw_info */ - CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), + /* + * mcpu fw info in mem: + * in: + * (u64)a0=paddr to struct vnic_devcmd_fw_info + * action: + * Fills in struct vnic_devcmd_fw_info (128 bytes) + * note: + * An old definition of CMD_MCPU_FW_INFO + */ + CMD_MCPU_FW_INFO_OLD = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 1), + + /* + * mcpu fw info in mem: + * in: + * (u64)a0=paddr to struct vnic_devcmd_fw_info + * (u16)a1=size of the structure + * out: + * (u16)a1=0 for in:a1 = 0, + * data size actually written for other values. + * action: + * Fills in first 128 bytes of vnic_devcmd_fw_info for in:a1 = 0, + * first in:a1 bytes for 0 < in:a1 <= 132, + * 132 bytes for other values of in:a1. + * note: + * CMD_MCPU_FW_INFO and CMD_MCPU_FW_INFO_OLD have the same enum 1 + * for source compatibility. + */ + CMD_MCPU_FW_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 1), /* dev-specific block member: * in: (u16)a0=offset,(u8)a1=size @@ -291,11 +317,19 @@ enum vnic_devcmd_error { ERR_EMAXRES = 10, }; +/* + * note: hw_version and asic_rev refer to the same thing, + * but have different formats. hw_version is + * a 32-byte string (e.g. "A2") and asic_rev is + * a 16-bit integer (e.g. 0xA2). + */ struct vnic_devcmd_fw_info { char fw_version[32]; char fw_build[32]; char hw_version[32]; char hw_serial_number[32]; + u16 asic_type; + u16 asic_rev; }; struct vnic_devcmd_notify { diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h index 37f08de2454a..2056586f4d4b 100644 --- a/drivers/net/enic/vnic_rq.h +++ b/drivers/net/enic/vnic_rq.h @@ -141,11 +141,6 @@ static inline void vnic_rq_post(struct vnic_rq *rq, } } -static inline int vnic_rq_posting_soon(struct vnic_rq *rq) -{ - return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0; -} - static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count) { rq->ring.desc_avail += count; diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 0cb1cf9cf4b0..a59cf961a436 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -111,6 +111,8 @@ * Sorry, I had to rewrite most of this for 2.5.x -DaveM */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/capability.h> #include <linux/module.h> #include <linux/kernel.h> @@ -162,7 +164,7 @@ static void eql_timer(unsigned long param) } static const char version[] __initconst = - "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n"; + "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)"; static const struct net_device_ops eql_netdev_ops = { .ndo_open = eql_open, @@ -204,8 +206,8 @@ static int eql_open(struct net_device *dev) equalizer_t *eql = netdev_priv(dev); /* XXX We should force this off automatically for the user. */ - printk(KERN_INFO "%s: remember to turn off Van-Jacobson compression on " - "your slave devices.\n", dev->name); + netdev_info(dev, + "remember to turn off Van-Jacobson compression on your slave devices\n"); BUG_ON(!list_empty(&eql->queue.all_slaves)); @@ -591,7 +593,7 @@ static int __init eql_init_module(void) { int err; - printk(version); + pr_info("%s\n", version); dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup); if (!dev_eql) diff --git a/drivers/net/fec.c b/drivers/net/fec.c index cd0282d5d40f..885d8baff7d5 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -54,7 +54,7 @@ #include "fec.h" -#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) +#if defined(CONFIG_ARM) #define FEC_ALIGNMENT 0xf #else #define FEC_ALIGNMENT 0x3 @@ -148,8 +148,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); * account when setting it. */ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ - defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) #else #define OPT_FRAME_SIZE 0 @@ -184,7 +183,7 @@ struct fec_enet_private { struct bufdesc *rx_bd_base; struct bufdesc *tx_bd_base; /* The next free ring entry */ - struct bufdesc *cur_rx, *cur_tx; + struct bufdesc *cur_rx, *cur_tx; /* The ring entries to be free()ed */ struct bufdesc *dirty_tx; @@ -192,28 +191,21 @@ struct fec_enet_private { /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ spinlock_t hw_lock; - struct platform_device *pdev; + struct platform_device *pdev; int opened; /* Phylib and MDIO interface */ - struct mii_bus *mii_bus; - struct phy_device *phy_dev; - int mii_timeout; - uint phy_speed; + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + int mii_timeout; + uint phy_speed; phy_interface_t phy_interface; int link; int full_duplex; struct completion mdio_done; }; -static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); -static void fec_enet_tx(struct net_device *dev); -static void fec_enet_rx(struct net_device *dev); -static int fec_enet_close(struct net_device *dev); -static void fec_restart(struct net_device *dev, int duplex); -static void fec_stop(struct net_device *dev); - /* FEC MII MMFR bits definition */ #define FEC_MMFR_ST (1 << 30) #define FEC_MMFR_OP_READ (2 << 28) @@ -240,9 +232,9 @@ static void *swap_buffer(void *bufaddr, int len) } static netdev_tx_t -fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); struct bufdesc *bdp; @@ -263,9 +255,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (status & BD_ENET_TX_READY) { /* Ooops. All transmit buffers are full. Bail out. - * This should not happen, since dev->tbusy should be set. + * This should not happen, since ndev->tbusy should be set. */ - printk("%s: tx queue full!.\n", dev->name); + printk("%s: tx queue full!.\n", ndev->name); spin_unlock_irqrestore(&fep->hw_lock, flags); return NETDEV_TX_BUSY; } @@ -285,7 +277,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { unsigned int index; index = bdp - fep->tx_bd_base; - memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len); + memcpy(fep->tx_bounce[index], skb->data, skb->len); bufaddr = fep->tx_bounce[index]; } @@ -300,13 +292,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Save skb pointer */ fep->tx_skbuff[fep->skb_cur] = skb; - dev->stats.tx_bytes += skb->len; + ndev->stats.tx_bytes += skb->len; fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK; /* Push the data cache so the CPM does not get stale memory * data. */ - bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr, + bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); /* Send it on its way. Tell FEC it's ready, interrupt when done, @@ -327,7 +319,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (bdp == fep->dirty_tx) { fep->tx_full = 1; - netif_stop_queue(dev); + netif_stop_queue(ndev); } fep->cur_tx = bdp; @@ -337,62 +329,170 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } +/* This function is called to start or restart the FEC during a link + * change. This only happens when switching between half and full + * duplex. + */ static void -fec_timeout(struct net_device *dev) +fec_restart(struct net_device *ndev, int duplex) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); + int i; + u32 temp_mac[2]; + u32 rcntl = OPT_FRAME_SIZE | 0x04; - dev->stats.tx_errors++; + /* Whack a reset. We should wait for this. */ + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); - fec_restart(dev, fep->full_duplex); - netif_wake_queue(dev); -} + /* + * enet-mac reset will reset mac address registers too, + * so need to reconfigure it. + */ + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); + writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); + writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); + } -static irqreturn_t -fec_enet_interrupt(int irq, void * dev_id) -{ - struct net_device *dev = dev_id; - struct fec_enet_private *fep = netdev_priv(dev); - uint int_events; - irqreturn_t ret = IRQ_NONE; + /* Clear any outstanding interrupt. */ + writel(0xffc00000, fep->hwp + FEC_IEVENT); - do { - int_events = readl(fep->hwp + FEC_IEVENT); - writel(int_events, fep->hwp + FEC_IEVENT); + /* Reset all multicast. */ + writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); + writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); +#ifndef CONFIG_M5272 + writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); + writel(0, fep->hwp + FEC_HASH_TABLE_LOW); +#endif - if (int_events & FEC_ENET_RXF) { - ret = IRQ_HANDLED; - fec_enet_rx(dev); - } + /* Set maximum receive buffer size. */ + writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); - /* Transmit OK, or non-fatal error. Update the buffer - * descriptors. FEC handles all errors, we just discover - * them as part of the transmit process. - */ - if (int_events & FEC_ENET_TXF) { - ret = IRQ_HANDLED; - fec_enet_tx(dev); + /* Set receive and transmit descriptor base. */ + writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); + writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, + fep->hwp + FEC_X_DES_START); + + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; + fep->cur_rx = fep->rx_bd_base; + + /* Reset SKB transmit buffers. */ + fep->skb_cur = fep->skb_dirty = 0; + for (i = 0; i <= TX_RING_MOD_MASK; i++) { + if (fep->tx_skbuff[i]) { + dev_kfree_skb_any(fep->tx_skbuff[i]); + fep->tx_skbuff[i] = NULL; } + } - if (int_events & FEC_ENET_MII) { - ret = IRQ_HANDLED; - complete(&fep->mdio_done); + /* Enable MII mode */ + if (duplex) { + /* FD enable */ + writel(0x04, fep->hwp + FEC_X_CNTRL); + } else { + /* No Rcv on Xmit */ + rcntl |= 0x02; + writel(0x0, fep->hwp + FEC_X_CNTRL); + } + + fep->full_duplex = duplex; + + /* Set MII speed */ + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + + /* + * The phy interface and speed need to get configured + * differently on enet-mac. + */ + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + /* Enable flow control and length check */ + rcntl |= 0x40000000 | 0x00000020; + + /* MII or RMII */ + if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) + rcntl |= (1 << 8); + else + rcntl &= ~(1 << 8); + + /* 10M or 100M */ + if (fep->phy_dev && fep->phy_dev->speed == SPEED_100) + rcntl &= ~(1 << 9); + else + rcntl |= (1 << 9); + + } else { +#ifdef FEC_MIIGSK_ENR + if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { + /* disable the gasket and wait */ + writel(0, fep->hwp + FEC_MIIGSK_ENR); + while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) + udelay(1); + + /* + * configure the gasket: + * RMII, 50 MHz, no loopback, no echo + */ + writel(1, fep->hwp + FEC_MIIGSK_CFGR); + + /* re-enable the gasket */ + writel(2, fep->hwp + FEC_MIIGSK_ENR); } - } while (int_events); +#endif + } + writel(rcntl, fep->hwp + FEC_R_CNTRL); - return ret; + /* And last, enable the transmit and receive processing */ + writel(2, fep->hwp + FEC_ECNTRL); + writel(0, fep->hwp + FEC_R_DES_ACTIVE); + + /* Enable interrupts we wish to service */ + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); +} + +static void +fec_stop(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + /* We cannot expect a graceful transmit stop without link !!! */ + if (fep->link) { + writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ + udelay(10); + if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) + printk("fec_stop : Graceful transmit stop did not complete !\n"); + } + + /* Whack a reset. We should wait for this. */ + writel(1, fep->hwp + FEC_ECNTRL); + udelay(10); + writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); + writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); } static void -fec_enet_tx(struct net_device *dev) +fec_timeout(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + ndev->stats.tx_errors++; + + fec_restart(ndev, fep->full_duplex); + netif_wake_queue(ndev); +} + +static void +fec_enet_tx(struct net_device *ndev) { struct fec_enet_private *fep; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; - fep = netdev_priv(dev); + fep = netdev_priv(ndev); spin_lock(&fep->hw_lock); bdp = fep->dirty_tx; @@ -400,7 +500,8 @@ fec_enet_tx(struct net_device *dev) if (bdp == fep->cur_tx && fep->tx_full == 0) break; - dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); bdp->cbd_bufaddr = 0; skb = fep->tx_skbuff[fep->skb_dirty]; @@ -408,19 +509,19 @@ fec_enet_tx(struct net_device *dev) if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { - dev->stats.tx_errors++; + ndev->stats.tx_errors++; if (status & BD_ENET_TX_HB) /* No heartbeat */ - dev->stats.tx_heartbeat_errors++; + ndev->stats.tx_heartbeat_errors++; if (status & BD_ENET_TX_LC) /* Late collision */ - dev->stats.tx_window_errors++; + ndev->stats.tx_window_errors++; if (status & BD_ENET_TX_RL) /* Retrans limit */ - dev->stats.tx_aborted_errors++; + ndev->stats.tx_aborted_errors++; if (status & BD_ENET_TX_UN) /* Underrun */ - dev->stats.tx_fifo_errors++; + ndev->stats.tx_fifo_errors++; if (status & BD_ENET_TX_CSL) /* Carrier lost */ - dev->stats.tx_carrier_errors++; + ndev->stats.tx_carrier_errors++; } else { - dev->stats.tx_packets++; + ndev->stats.tx_packets++; } if (status & BD_ENET_TX_READY) @@ -430,7 +531,7 @@ fec_enet_tx(struct net_device *dev) * but we eventually sent the packet OK. */ if (status & BD_ENET_TX_DEF) - dev->stats.collisions++; + ndev->stats.collisions++; /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); @@ -447,8 +548,8 @@ fec_enet_tx(struct net_device *dev) */ if (fep->tx_full) { fep->tx_full = 0; - if (netif_queue_stopped(dev)) - netif_wake_queue(dev); + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); } } fep->dirty_tx = bdp; @@ -462,9 +563,9 @@ fec_enet_tx(struct net_device *dev) * effectively tossing the packet. */ static void -fec_enet_rx(struct net_device *dev) +fec_enet_rx(struct net_device *ndev) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); struct bufdesc *bdp; @@ -498,17 +599,17 @@ fec_enet_rx(struct net_device *dev) /* Check for errors. */ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { - dev->stats.rx_errors++; + ndev->stats.rx_errors++; if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { /* Frame too long or too short. */ - dev->stats.rx_length_errors++; + ndev->stats.rx_length_errors++; } if (status & BD_ENET_RX_NO) /* Frame alignment */ - dev->stats.rx_frame_errors++; + ndev->stats.rx_frame_errors++; if (status & BD_ENET_RX_CR) /* CRC Error */ - dev->stats.rx_crc_errors++; + ndev->stats.rx_crc_errors++; if (status & BD_ENET_RX_OV) /* FIFO overrun */ - dev->stats.rx_fifo_errors++; + ndev->stats.rx_fifo_errors++; } /* Report late collisions as a frame error. @@ -516,19 +617,19 @@ fec_enet_rx(struct net_device *dev) * have in the buffer. So, just drop this frame on the floor. */ if (status & BD_ENET_RX_CL) { - dev->stats.rx_errors++; - dev->stats.rx_frame_errors++; + ndev->stats.rx_errors++; + ndev->stats.rx_frame_errors++; goto rx_processing_done; } /* Process the incoming frame. */ - dev->stats.rx_packets++; + ndev->stats.rx_packets++; pkt_len = bdp->cbd_datlen; - dev->stats.rx_bytes += pkt_len; + ndev->stats.rx_bytes += pkt_len; data = (__u8*)__va(bdp->cbd_bufaddr); - dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, - DMA_FROM_DEVICE); + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) swap_buffer(data, pkt_len); @@ -542,18 +643,18 @@ fec_enet_rx(struct net_device *dev) if (unlikely(!skb)) { printk("%s: Memory squeeze, dropping packet.\n", - dev->name); - dev->stats.rx_dropped++; + ndev->name); + ndev->stats.rx_dropped++; } else { skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, pkt_len - 4); /* Make room */ skb_copy_to_linear_data(skb, data, pkt_len - 4); - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, ndev); netif_rx(skb); } - bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, - DMA_FROM_DEVICE); + bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, + FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE); rx_processing_done: /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; @@ -578,10 +679,47 @@ rx_processing_done: spin_unlock(&fep->hw_lock); } +static irqreturn_t +fec_enet_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct fec_enet_private *fep = netdev_priv(ndev); + uint int_events; + irqreturn_t ret = IRQ_NONE; + + do { + int_events = readl(fep->hwp + FEC_IEVENT); + writel(int_events, fep->hwp + FEC_IEVENT); + + if (int_events & FEC_ENET_RXF) { + ret = IRQ_HANDLED; + fec_enet_rx(ndev); + } + + /* Transmit OK, or non-fatal error. Update the buffer + * descriptors. FEC handles all errors, we just discover + * them as part of the transmit process. + */ + if (int_events & FEC_ENET_TXF) { + ret = IRQ_HANDLED; + fec_enet_tx(ndev); + } + + if (int_events & FEC_ENET_MII) { + ret = IRQ_HANDLED; + complete(&fep->mdio_done); + } + } while (int_events); + + return ret; +} + + + /* ------------------------------------------------------------------------- */ -static void __inline__ fec_get_mac(struct net_device *dev) +static void __inline__ fec_get_mac(struct net_device *ndev) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); struct fec_platform_data *pdata = fep->pdev->dev.platform_data; unsigned char *iap, tmpaddr[ETH_ALEN]; @@ -617,11 +755,11 @@ static void __inline__ fec_get_mac(struct net_device *dev) iap = &tmpaddr[0]; } - memcpy(dev->dev_addr, iap, ETH_ALEN); + memcpy(ndev->dev_addr, iap, ETH_ALEN); /* Adjust MAC if using macaddr */ if (iap == macaddr) - dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; + ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; } /* ------------------------------------------------------------------------- */ @@ -629,9 +767,9 @@ static void __inline__ fec_get_mac(struct net_device *dev) /* * Phy section */ -static void fec_enet_adjust_link(struct net_device *dev) +static void fec_enet_adjust_link(struct net_device *ndev) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phy_dev = fep->phy_dev; unsigned long flags; @@ -648,7 +786,7 @@ static void fec_enet_adjust_link(struct net_device *dev) /* Duplex link change */ if (phy_dev->link) { if (fep->full_duplex != phy_dev->duplex) { - fec_restart(dev, phy_dev->duplex); + fec_restart(ndev, phy_dev->duplex); status_change = 1; } } @@ -657,9 +795,9 @@ static void fec_enet_adjust_link(struct net_device *dev) if (phy_dev->link != fep->link) { fep->link = phy_dev->link; if (phy_dev->link) - fec_restart(dev, phy_dev->duplex); + fec_restart(ndev, phy_dev->duplex); else - fec_stop(dev); + fec_stop(ndev); status_change = 1; } @@ -728,9 +866,9 @@ static int fec_enet_mdio_reset(struct mii_bus *bus) return 0; } -static int fec_enet_mii_probe(struct net_device *dev) +static int fec_enet_mii_probe(struct net_device *ndev) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phy_dev = NULL; char mdio_bus_id[MII_BUS_ID_SIZE]; char phy_name[MII_BUS_ID_SIZE + 3]; @@ -755,16 +893,16 @@ static int fec_enet_mii_probe(struct net_device *dev) if (phy_id >= PHY_MAX_ADDR) { printk(KERN_INFO "%s: no PHY, assuming direct connection " - "to switch\n", dev->name); + "to switch\n", ndev->name); strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); phy_id = 0; } snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); - phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0, + phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(phy_dev)) { - printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); + printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name); return PTR_ERR(phy_dev); } @@ -777,7 +915,7 @@ static int fec_enet_mii_probe(struct net_device *dev) fep->full_duplex = 0; printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, + "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), fep->phy_dev->irq); @@ -787,8 +925,8 @@ static int fec_enet_mii_probe(struct net_device *dev) static int fec_enet_mii_init(struct platform_device *pdev) { static struct mii_bus *fec0_mii_bus; - struct net_device *dev = platform_get_drvdata(pdev); - struct fec_enet_private *fep = netdev_priv(dev); + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); int err = -ENXIO, i; @@ -846,8 +984,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) for (i = 0; i < PHY_MAX_ADDR; i++) fep->mii_bus->irq[i] = PHY_POLL; - platform_set_drvdata(dev, fep->mii_bus); - if (mdiobus_register(fep->mii_bus)) goto err_out_free_mdio_irq; @@ -874,10 +1010,10 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep) mdiobus_free(fep->mii_bus); } -static int fec_enet_get_settings(struct net_device *dev, +static int fec_enet_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phydev = fep->phy_dev; if (!phydev) @@ -886,10 +1022,10 @@ static int fec_enet_get_settings(struct net_device *dev, return phy_ethtool_gset(phydev, cmd); } -static int fec_enet_set_settings(struct net_device *dev, +static int fec_enet_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phydev = fep->phy_dev; if (!phydev) @@ -898,14 +1034,14 @@ static int fec_enet_set_settings(struct net_device *dev, return phy_ethtool_sset(phydev, cmd); } -static void fec_enet_get_drvinfo(struct net_device *dev, +static void fec_enet_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); strcpy(info->driver, fep->pdev->dev.driver->name); strcpy(info->version, "Revision: 1.0"); - strcpy(info->bus_info, dev_name(&dev->dev)); + strcpy(info->bus_info, dev_name(&ndev->dev)); } static struct ethtool_ops fec_enet_ethtool_ops = { @@ -915,12 +1051,12 @@ static struct ethtool_ops fec_enet_ethtool_ops = { .get_link = ethtool_op_get_link, }; -static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phydev = fep->phy_dev; - if (!netif_running(dev)) + if (!netif_running(ndev)) return -EINVAL; if (!phydev) @@ -929,9 +1065,9 @@ static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(phydev, rq, cmd); } -static void fec_enet_free_buffers(struct net_device *dev) +static void fec_enet_free_buffers(struct net_device *ndev) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); int i; struct sk_buff *skb; struct bufdesc *bdp; @@ -941,7 +1077,7 @@ static void fec_enet_free_buffers(struct net_device *dev) skb = fep->rx_skbuff[i]; if (bdp->cbd_bufaddr) - dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, + dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); if (skb) dev_kfree_skb(skb); @@ -953,9 +1089,9 @@ static void fec_enet_free_buffers(struct net_device *dev) kfree(fep->tx_bounce[i]); } -static int fec_enet_alloc_buffers(struct net_device *dev) +static int fec_enet_alloc_buffers(struct net_device *ndev) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); int i; struct sk_buff *skb; struct bufdesc *bdp; @@ -964,12 +1100,12 @@ static int fec_enet_alloc_buffers(struct net_device *dev) for (i = 0; i < RX_RING_SIZE; i++) { skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); if (!skb) { - fec_enet_free_buffers(dev); + fec_enet_free_buffers(ndev); return -ENOMEM; } fep->rx_skbuff[i] = skb; - bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, + bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); bdp->cbd_sc = BD_ENET_RX_EMPTY; bdp++; @@ -996,45 +1132,47 @@ static int fec_enet_alloc_buffers(struct net_device *dev) } static int -fec_enet_open(struct net_device *dev) +fec_enet_open(struct net_device *ndev) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); int ret; /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. */ - ret = fec_enet_alloc_buffers(dev); + ret = fec_enet_alloc_buffers(ndev); if (ret) return ret; /* Probe and connect to PHY when open the interface */ - ret = fec_enet_mii_probe(dev); + ret = fec_enet_mii_probe(ndev); if (ret) { - fec_enet_free_buffers(dev); + fec_enet_free_buffers(ndev); return ret; } phy_start(fep->phy_dev); - netif_start_queue(dev); + netif_start_queue(ndev); fep->opened = 1; return 0; } static int -fec_enet_close(struct net_device *dev) +fec_enet_close(struct net_device *ndev) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); /* Don't know what to do yet. */ fep->opened = 0; - netif_stop_queue(dev); - fec_stop(dev); + netif_stop_queue(ndev); + fec_stop(ndev); - if (fep->phy_dev) + if (fep->phy_dev) { + phy_stop(fep->phy_dev); phy_disconnect(fep->phy_dev); + } - fec_enet_free_buffers(dev); + fec_enet_free_buffers(ndev); return 0; } @@ -1052,14 +1190,14 @@ fec_enet_close(struct net_device *dev) #define HASH_BITS 6 /* #bits in hash */ #define CRC32_POLY 0xEDB88320 -static void set_multicast_list(struct net_device *dev) +static void set_multicast_list(struct net_device *ndev) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); struct netdev_hw_addr *ha; unsigned int i, bit, data, crc, tmp; unsigned char hash; - if (dev->flags & IFF_PROMISC) { + if (ndev->flags & IFF_PROMISC) { tmp = readl(fep->hwp + FEC_R_CNTRL); tmp |= 0x8; writel(tmp, fep->hwp + FEC_R_CNTRL); @@ -1070,7 +1208,7 @@ static void set_multicast_list(struct net_device *dev) tmp &= ~0x8; writel(tmp, fep->hwp + FEC_R_CNTRL); - if (dev->flags & IFF_ALLMULTI) { + if (ndev->flags & IFF_ALLMULTI) { /* Catch all multicast addresses, so set the * filter to all 1's */ @@ -1085,7 +1223,7 @@ static void set_multicast_list(struct net_device *dev) writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - netdev_for_each_mc_addr(ha, dev) { + netdev_for_each_mc_addr(ha, ndev) { /* Only support group multicast for now */ if (!(ha->addr[0] & 1)) continue; @@ -1093,7 +1231,7 @@ static void set_multicast_list(struct net_device *dev) /* calculate crc32 value of mac address */ crc = 0xffffffff; - for (i = 0; i < dev->addr_len; i++) { + for (i = 0; i < ndev->addr_len; i++) { data = ha->addr[i]; for (bit = 0; bit < 8; bit++, data >>= 1) { crc = (crc >> 1) ^ @@ -1120,20 +1258,20 @@ static void set_multicast_list(struct net_device *dev) /* Set a MAC change in hardware. */ static int -fec_set_mac_address(struct net_device *dev, void *p) +fec_set_mac_address(struct net_device *ndev, void *p) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); - writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | - (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), + writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | + (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), fep->hwp + FEC_ADDR_LOW); - writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), + writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), fep->hwp + FEC_ADDR_HIGH); return 0; } @@ -1147,16 +1285,16 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, - .ndo_do_ioctl = fec_enet_ioctl, + .ndo_do_ioctl = fec_enet_ioctl, }; /* * XXX: We need to clean up on failure exits here. * */ -static int fec_enet_init(struct net_device *dev) +static int fec_enet_init(struct net_device *ndev) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = netdev_priv(ndev); struct bufdesc *cbd_base; struct bufdesc *bdp; int i; @@ -1171,20 +1309,19 @@ static int fec_enet_init(struct net_device *dev) spin_lock_init(&fep->hw_lock); - fep->hwp = (void __iomem *)dev->base_addr; - fep->netdev = dev; + fep->netdev = ndev; /* Get the Ethernet address */ - fec_get_mac(dev); + fec_get_mac(ndev); /* Set receive and transmit descriptor base. */ fep->rx_bd_base = cbd_base; fep->tx_bd_base = cbd_base + RX_RING_SIZE; /* The FEC Ethernet specific entries in the device structure */ - dev->watchdog_timeo = TX_TIMEOUT; - dev->netdev_ops = &fec_netdev_ops; - dev->ethtool_ops = &fec_enet_ethtool_ops; + ndev->watchdog_timeo = TX_TIMEOUT; + ndev->netdev_ops = &fec_netdev_ops; + ndev->ethtool_ops = &fec_enet_ethtool_ops; /* Initialize the receive buffer descriptors. */ bdp = fep->rx_bd_base; @@ -1213,152 +1350,11 @@ static int fec_enet_init(struct net_device *dev) bdp--; bdp->cbd_sc |= BD_SC_WRAP; - fec_restart(dev, 0); + fec_restart(ndev, 0); return 0; } -/* This function is called to start or restart the FEC during a link - * change. This only happens when switching between half and full - * duplex. - */ -static void -fec_restart(struct net_device *dev, int duplex) -{ - struct fec_enet_private *fep = netdev_priv(dev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - int i; - u32 val, temp_mac[2]; - - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); - - /* - * enet-mac reset will reset mac address registers too, - * so need to reconfigure it. - */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { - memcpy(&temp_mac, dev->dev_addr, ETH_ALEN); - writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); - writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); - } - - /* Clear any outstanding interrupt. */ - writel(0xffc00000, fep->hwp + FEC_IEVENT); - - /* Reset all multicast. */ - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); -#ifndef CONFIG_M5272 - writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_HASH_TABLE_LOW); -#endif - - /* Set maximum receive buffer size. */ - writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); - - /* Set receive and transmit descriptor base. */ - writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, - fep->hwp + FEC_X_DES_START); - - fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; - fep->cur_rx = fep->rx_bd_base; - - /* Reset SKB transmit buffers. */ - fep->skb_cur = fep->skb_dirty = 0; - for (i = 0; i <= TX_RING_MOD_MASK; i++) { - if (fep->tx_skbuff[i]) { - dev_kfree_skb_any(fep->tx_skbuff[i]); - fep->tx_skbuff[i] = NULL; - } - } - - /* Enable MII mode */ - if (duplex) { - /* MII enable / FD enable */ - writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); - writel(0x04, fep->hwp + FEC_X_CNTRL); - } else { - /* MII enable / No Rcv on Xmit */ - writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL); - writel(0x0, fep->hwp + FEC_X_CNTRL); - } - fep->full_duplex = duplex; - - /* Set MII speed */ - writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); - - /* - * The phy interface and speed need to get configured - * differently on enet-mac. - */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { - val = readl(fep->hwp + FEC_R_CNTRL); - - /* MII or RMII */ - if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) - val |= (1 << 8); - else - val &= ~(1 << 8); - - /* 10M or 100M */ - if (fep->phy_dev && fep->phy_dev->speed == SPEED_100) - val &= ~(1 << 9); - else - val |= (1 << 9); - - writel(val, fep->hwp + FEC_R_CNTRL); - } else { -#ifdef FEC_MIIGSK_ENR - if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { - /* disable the gasket and wait */ - writel(0, fep->hwp + FEC_MIIGSK_ENR); - while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) - udelay(1); - - /* - * configure the gasket: - * RMII, 50 MHz, no loopback, no echo - */ - writel(1, fep->hwp + FEC_MIIGSK_CFGR); - - /* re-enable the gasket */ - writel(2, fep->hwp + FEC_MIIGSK_ENR); - } -#endif - } - - /* And last, enable the transmit and receive processing */ - writel(2, fep->hwp + FEC_ECNTRL); - writel(0, fep->hwp + FEC_R_DES_ACTIVE); - - /* Enable interrupts we wish to service */ - writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); -} - -static void -fec_stop(struct net_device *dev) -{ - struct fec_enet_private *fep = netdev_priv(dev); - - /* We cannot expect a graceful transmit stop without link !!! */ - if (fep->link) { - writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ - udelay(10); - if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) - printk("fec_stop : Graceful transmit stop did not complete !\n"); - } - - /* Whack a reset. We should wait for this. */ - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); - writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); - writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); -} - static int __devinit fec_probe(struct platform_device *pdev) { @@ -1378,19 +1374,20 @@ fec_probe(struct platform_device *pdev) /* Init network device */ ndev = alloc_etherdev(sizeof(struct fec_enet_private)); - if (!ndev) - return -ENOMEM; + if (!ndev) { + ret = -ENOMEM; + goto failed_alloc_etherdev; + } SET_NETDEV_DEV(ndev, &pdev->dev); /* setup board info structure */ fep = netdev_priv(ndev); - memset(fep, 0, sizeof(*fep)); - ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); + fep->hwp = ioremap(r->start, resource_size(r)); fep->pdev = pdev; - if (!ndev->base_addr) { + if (!fep->hwp) { ret = -ENOMEM; goto failed_ioremap; } @@ -1408,10 +1405,9 @@ fec_probe(struct platform_device *pdev) break; ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); if (ret) { - while (i >= 0) { + while (--i >= 0) { irq = platform_get_irq(pdev, i); free_irq(irq, ndev); - i--; } goto failed_irq; } @@ -1454,9 +1450,11 @@ failed_clk: free_irq(irq, ndev); } failed_irq: - iounmap((void __iomem *)ndev->base_addr); + iounmap(fep->hwp); failed_ioremap: free_netdev(ndev); +failed_alloc_etherdev: + release_mem_region(r->start, resource_size(r)); return ret; } @@ -1466,16 +1464,22 @@ fec_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - - platform_set_drvdata(pdev, NULL); + struct resource *r; fec_stop(ndev); fec_enet_mii_remove(fep); clk_disable(fep->clk); clk_put(fep->clk); - iounmap((void __iomem *)ndev->base_addr); + iounmap(fep->hwp); unregister_netdev(ndev); free_netdev(ndev); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + BUG_ON(!r); + release_mem_region(r->start, resource_size(r)); + + platform_set_drvdata(pdev, NULL); + return 0; } @@ -1484,16 +1488,14 @@ static int fec_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); - struct fec_enet_private *fep; + struct fec_enet_private *fep = netdev_priv(ndev); - if (ndev) { - fep = netdev_priv(ndev); - if (netif_running(ndev)) { - fec_stop(ndev); - netif_device_detach(ndev); - } - clk_disable(fep->clk); + if (netif_running(ndev)) { + fec_stop(ndev); + netif_device_detach(ndev); } + clk_disable(fep->clk); + return 0; } @@ -1501,16 +1503,14 @@ static int fec_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); - struct fec_enet_private *fep; + struct fec_enet_private *fep = netdev_priv(ndev); - if (ndev) { - fep = netdev_priv(ndev); - clk_enable(fep->clk); - if (netif_running(ndev)) { - fec_restart(ndev, fep->full_duplex); - netif_device_attach(ndev); - } + clk_enable(fep->clk); + if (netif_running(ndev)) { + fec_restart(ndev, fep->full_duplex); + netif_device_attach(ndev); } + return 0; } diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 9c0b1bac6af6..7b92897ca66b 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -5744,7 +5744,7 @@ static void __devexit nv_remove(struct pci_dev *pci_dev) pci_set_drvdata(pci_dev, NULL); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int nv_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); @@ -5795,6 +5795,11 @@ static int nv_resume(struct device *device) static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume); #define NV_PM_OPS (&nv_pm_ops) +#else +#define NV_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM static void nv_shutdown(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); @@ -5822,7 +5827,6 @@ static void nv_shutdown(struct pci_dev *pdev) } } #else -#define NV_PM_OPS NULL #define nv_shutdown NULL #endif /* CONFIG_PM */ diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c new file mode 100644 index 000000000000..1d6f4b8d393a --- /dev/null +++ b/drivers/net/ftmac100.c @@ -0,0 +1,1198 @@ +/* + * Faraday FTMAC100 10/100 Ethernet + * + * (C) Copyright 2009-2011 Faraday Technology + * Po-Yu Chuang <ratbert@faraday-tech.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> + +#include "ftmac100.h" + +#define DRV_NAME "ftmac100" +#define DRV_VERSION "0.2" + +#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */ +#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */ + +#define MAX_PKT_SIZE 1518 +#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */ + +#if MAX_PKT_SIZE > 0x7ff +#error invalid MAX_PKT_SIZE +#endif + +#if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE +#error invalid RX_BUF_SIZE +#endif + +/****************************************************************************** + * private data + *****************************************************************************/ +struct ftmac100_descs { + struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES]; + struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES]; +}; + +struct ftmac100 { + struct resource *res; + void __iomem *base; + int irq; + + struct ftmac100_descs *descs; + dma_addr_t descs_dma_addr; + + unsigned int rx_pointer; + unsigned int tx_clean_pointer; + unsigned int tx_pointer; + unsigned int tx_pending; + + spinlock_t tx_lock; + + struct net_device *netdev; + struct device *dev; + struct napi_struct napi; + + struct mii_if_info mii; +}; + +static int ftmac100_alloc_rx_page(struct ftmac100 *priv, + struct ftmac100_rxdes *rxdes, gfp_t gfp); + +/****************************************************************************** + * internal functions (hardware register access) + *****************************************************************************/ +#define INT_MASK_ALL_ENABLED (FTMAC100_INT_RPKT_FINISH | \ + FTMAC100_INT_NORXBUF | \ + FTMAC100_INT_XPKT_OK | \ + FTMAC100_INT_XPKT_LOST | \ + FTMAC100_INT_RPKT_LOST | \ + FTMAC100_INT_AHB_ERR | \ + FTMAC100_INT_PHYSTS_CHG) + +#define INT_MASK_ALL_DISABLED 0 + +static void ftmac100_enable_all_int(struct ftmac100 *priv) +{ + iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR); +} + +static void ftmac100_disable_all_int(struct ftmac100 *priv) +{ + iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR); +} + +static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr) +{ + iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR); +} + +static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr) +{ + iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR); +} + +static void ftmac100_txdma_start_polling(struct ftmac100 *priv) +{ + iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD); +} + +static int ftmac100_reset(struct ftmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + int i; + + /* NOTE: reset clears all registers */ + iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR); + + for (i = 0; i < 5; i++) { + unsigned int maccr; + + maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR); + if (!(maccr & FTMAC100_MACCR_SW_RST)) { + /* + * FTMAC100_MACCR_SW_RST cleared does not indicate + * that hardware reset completed (what the f*ck). + * We still need to wait for a while. + */ + usleep_range(500, 1000); + return 0; + } + + usleep_range(1000, 10000); + } + + netdev_err(netdev, "software reset failed\n"); + return -EIO; +} + +static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac) +{ + unsigned int maddr = mac[0] << 8 | mac[1]; + unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; + + iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR); + iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR); +} + +#define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \ + FTMAC100_MACCR_RCV_EN | \ + FTMAC100_MACCR_XDMA_EN | \ + FTMAC100_MACCR_RDMA_EN | \ + FTMAC100_MACCR_CRC_APD | \ + FTMAC100_MACCR_FULLDUP | \ + FTMAC100_MACCR_RX_RUNT | \ + FTMAC100_MACCR_RX_BROADPKT) + +static int ftmac100_start_hw(struct ftmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + + if (ftmac100_reset(priv)) + return -EIO; + + /* setup ring buffer base registers */ + ftmac100_set_rx_ring_base(priv, + priv->descs_dma_addr + + offsetof(struct ftmac100_descs, rxdes)); + ftmac100_set_tx_ring_base(priv, + priv->descs_dma_addr + + offsetof(struct ftmac100_descs, txdes)); + + iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC); + + ftmac100_set_mac(priv, netdev->dev_addr); + + iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR); + return 0; +} + +static void ftmac100_stop_hw(struct ftmac100 *priv) +{ + iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR); +} + +/****************************************************************************** + * internal functions (receive descriptor) + *****************************************************************************/ +static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS); +} + +static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS); +} + +static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN); +} + +static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes) +{ + /* clear status bits */ + rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN); +} + +static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR); +} + +static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR); +} + +static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL); +} + +static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT); +} + +static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB); +} + +static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes) +{ + return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL; +} + +static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes) +{ + return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST); +} + +static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes, + unsigned int size) +{ + rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR); + rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size)); +} + +static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes) +{ + rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR); +} + +static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes, + dma_addr_t addr) +{ + rxdes->rxdes2 = cpu_to_le32(addr); +} + +static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes) +{ + return le32_to_cpu(rxdes->rxdes2); +} + +/* + * rxdes3 is not used by hardware. We use it to keep track of page. + * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). + */ +static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page) +{ + rxdes->rxdes3 = (unsigned int)page; +} + +static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes) +{ + return (struct page *)rxdes->rxdes3; +} + +/****************************************************************************** + * internal functions (receive) + *****************************************************************************/ +static int ftmac100_next_rx_pointer(int pointer) +{ + return (pointer + 1) & (RX_QUEUE_ENTRIES - 1); +} + +static void ftmac100_rx_pointer_advance(struct ftmac100 *priv) +{ + priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer); +} + +static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv) +{ + return &priv->descs->rxdes[priv->rx_pointer]; +} + +static struct ftmac100_rxdes * +ftmac100_rx_locate_first_segment(struct ftmac100 *priv) +{ + struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv); + + while (!ftmac100_rxdes_owned_by_dma(rxdes)) { + if (ftmac100_rxdes_first_segment(rxdes)) + return rxdes; + + ftmac100_rxdes_set_dma_own(rxdes); + ftmac100_rx_pointer_advance(priv); + rxdes = ftmac100_current_rxdes(priv); + } + + return NULL; +} + +static bool ftmac100_rx_packet_error(struct ftmac100 *priv, + struct ftmac100_rxdes *rxdes) +{ + struct net_device *netdev = priv->netdev; + bool error = false; + + if (unlikely(ftmac100_rxdes_rx_error(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx err\n"); + + netdev->stats.rx_errors++; + error = true; + } + + if (unlikely(ftmac100_rxdes_crc_error(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx crc err\n"); + + netdev->stats.rx_crc_errors++; + error = true; + } + + if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx frame too long\n"); + + netdev->stats.rx_length_errors++; + error = true; + } else if (unlikely(ftmac100_rxdes_runt(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx runt\n"); + + netdev->stats.rx_length_errors++; + error = true; + } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) { + if (net_ratelimit()) + netdev_info(netdev, "rx odd nibble\n"); + + netdev->stats.rx_length_errors++; + error = true; + } + + return error; +} + +static void ftmac100_rx_drop_packet(struct ftmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv); + bool done = false; + + if (net_ratelimit()) + netdev_dbg(netdev, "drop packet %p\n", rxdes); + + do { + if (ftmac100_rxdes_last_segment(rxdes)) + done = true; + + ftmac100_rxdes_set_dma_own(rxdes); + ftmac100_rx_pointer_advance(priv); + rxdes = ftmac100_current_rxdes(priv); + } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes)); + + netdev->stats.rx_dropped++; +} + +static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed) +{ + struct net_device *netdev = priv->netdev; + struct ftmac100_rxdes *rxdes; + struct sk_buff *skb; + struct page *page; + dma_addr_t map; + int length; + + rxdes = ftmac100_rx_locate_first_segment(priv); + if (!rxdes) + return false; + + if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) { + ftmac100_rx_drop_packet(priv); + return true; + } + + /* + * It is impossible to get multi-segment packets + * because we always provide big enough receive buffers. + */ + if (unlikely(!ftmac100_rxdes_last_segment(rxdes))) + BUG(); + + /* start processing */ + skb = netdev_alloc_skb_ip_align(netdev, 128); + if (unlikely(!skb)) { + if (net_ratelimit()) + netdev_err(netdev, "rx skb alloc failed\n"); + + ftmac100_rx_drop_packet(priv); + return true; + } + + if (unlikely(ftmac100_rxdes_multicast(rxdes))) + netdev->stats.multicast++; + + map = ftmac100_rxdes_get_dma_addr(rxdes); + dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); + + length = ftmac100_rxdes_frame_length(rxdes); + page = ftmac100_rxdes_get_page(rxdes); + skb_fill_page_desc(skb, 0, page, 0, length); + skb->len += length; + skb->data_len += length; + skb->truesize += length; + __pskb_pull_tail(skb, min(length, 64)); + + ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC); + + ftmac100_rx_pointer_advance(priv); + + skb->protocol = eth_type_trans(skb, netdev); + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; + + /* push packet to protocol stack */ + netif_receive_skb(skb); + + (*processed)++; + return true; +} + +/****************************************************************************** + * internal functions (transmit descriptor) + *****************************************************************************/ +static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes) +{ + /* clear all except end of ring bit */ + txdes->txdes0 = 0; + txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR); + txdes->txdes2 = 0; + txdes->txdes3 = 0; +} + +static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes) +{ + return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN); +} + +static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes) +{ + /* + * Make sure dma own bit will not be set before any other + * descriptor fields. + */ + wmb(); + txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN); +} + +static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes) +{ + return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL); +} + +static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes) +{ + return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL); +} + +static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR); +} + +static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS); +} + +static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS); +} + +static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes) +{ + txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC); +} + +static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes, + unsigned int len) +{ + txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len)); +} + +static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes, + dma_addr_t addr) +{ + txdes->txdes2 = cpu_to_le32(addr); +} + +static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes) +{ + return le32_to_cpu(txdes->txdes2); +} + +/* + * txdes3 is not used by hardware. We use it to keep track of socket buffer. + * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). + */ +static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb) +{ + txdes->txdes3 = (unsigned int)skb; +} + +static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes) +{ + return (struct sk_buff *)txdes->txdes3; +} + +/****************************************************************************** + * internal functions (transmit) + *****************************************************************************/ +static int ftmac100_next_tx_pointer(int pointer) +{ + return (pointer + 1) & (TX_QUEUE_ENTRIES - 1); +} + +static void ftmac100_tx_pointer_advance(struct ftmac100 *priv) +{ + priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer); +} + +static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv) +{ + priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer); +} + +static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv) +{ + return &priv->descs->txdes[priv->tx_pointer]; +} + +static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv) +{ + return &priv->descs->txdes[priv->tx_clean_pointer]; +} + +static bool ftmac100_tx_complete_packet(struct ftmac100 *priv) +{ + struct net_device *netdev = priv->netdev; + struct ftmac100_txdes *txdes; + struct sk_buff *skb; + dma_addr_t map; + + if (priv->tx_pending == 0) + return false; + + txdes = ftmac100_current_clean_txdes(priv); + + if (ftmac100_txdes_owned_by_dma(txdes)) + return false; + + skb = ftmac100_txdes_get_skb(txdes); + map = ftmac100_txdes_get_dma_addr(txdes); + + if (unlikely(ftmac100_txdes_excessive_collision(txdes) || + ftmac100_txdes_late_collision(txdes))) { + /* + * packet transmitted to ethernet lost due to late collision + * or excessive collision + */ + netdev->stats.tx_aborted_errors++; + } else { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += skb->len; + } + + dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); + dev_kfree_skb(skb); + + ftmac100_txdes_reset(txdes); + + ftmac100_tx_clean_pointer_advance(priv); + + spin_lock(&priv->tx_lock); + priv->tx_pending--; + spin_unlock(&priv->tx_lock); + netif_wake_queue(netdev); + + return true; +} + +static void ftmac100_tx_complete(struct ftmac100 *priv) +{ + while (ftmac100_tx_complete_packet(priv)) + ; +} + +static int ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb, + dma_addr_t map) +{ + struct net_device *netdev = priv->netdev; + struct ftmac100_txdes *txdes; + unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; + + txdes = ftmac100_current_txdes(priv); + ftmac100_tx_pointer_advance(priv); + + /* setup TX descriptor */ + ftmac100_txdes_set_skb(txdes, skb); + ftmac100_txdes_set_dma_addr(txdes, map); + + ftmac100_txdes_set_first_segment(txdes); + ftmac100_txdes_set_last_segment(txdes); + ftmac100_txdes_set_txint(txdes); + ftmac100_txdes_set_buffer_size(txdes, len); + + spin_lock(&priv->tx_lock); + priv->tx_pending++; + if (priv->tx_pending == TX_QUEUE_ENTRIES) + netif_stop_queue(netdev); + + /* start transmit */ + ftmac100_txdes_set_dma_own(txdes); + spin_unlock(&priv->tx_lock); + + ftmac100_txdma_start_polling(priv); + return NETDEV_TX_OK; +} + +/****************************************************************************** + * internal functions (buffer) + *****************************************************************************/ +static int ftmac100_alloc_rx_page(struct ftmac100 *priv, + struct ftmac100_rxdes *rxdes, gfp_t gfp) +{ + struct net_device *netdev = priv->netdev; + struct page *page; + dma_addr_t map; + + page = alloc_page(gfp); + if (!page) { + if (net_ratelimit()) + netdev_err(netdev, "failed to allocate rx page\n"); + return -ENOMEM; + } + + map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, map))) { + if (net_ratelimit()) + netdev_err(netdev, "failed to map rx page\n"); + __free_page(page); + return -ENOMEM; + } + + ftmac100_rxdes_set_page(rxdes, page); + ftmac100_rxdes_set_dma_addr(rxdes, map); + ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE); + ftmac100_rxdes_set_dma_own(rxdes); + return 0; +} + +static void ftmac100_free_buffers(struct ftmac100 *priv) +{ + int i; + + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { + struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; + struct page *page = ftmac100_rxdes_get_page(rxdes); + dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes); + + if (!page) + continue; + + dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); + __free_page(page); + } + + for (i = 0; i < TX_QUEUE_ENTRIES; i++) { + struct ftmac100_txdes *txdes = &priv->descs->txdes[i]; + struct sk_buff *skb = ftmac100_txdes_get_skb(txdes); + dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes); + + if (!skb) + continue; + + dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); + dev_kfree_skb(skb); + } + + dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs), + priv->descs, priv->descs_dma_addr); +} + +static int ftmac100_alloc_buffers(struct ftmac100 *priv) +{ + int i; + + priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs), + &priv->descs_dma_addr, GFP_KERNEL); + if (!priv->descs) + return -ENOMEM; + + memset(priv->descs, 0, sizeof(struct ftmac100_descs)); + + /* initialize RX ring */ + ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); + + for (i = 0; i < RX_QUEUE_ENTRIES; i++) { + struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; + + if (ftmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL)) + goto err; + } + + /* initialize TX ring */ + ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]); + return 0; + +err: + ftmac100_free_buffers(priv); + return -ENOMEM; +} + +/****************************************************************************** + * struct mii_if_info functions + *****************************************************************************/ +static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg) +{ + struct ftmac100 *priv = netdev_priv(netdev); + unsigned int phycr; + int i; + + phycr = FTMAC100_PHYCR_PHYAD(phy_id) | + FTMAC100_PHYCR_REGAD(reg) | + FTMAC100_PHYCR_MIIRD; + + iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR); + + for (i = 0; i < 10; i++) { + phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR); + + if ((phycr & FTMAC100_PHYCR_MIIRD) == 0) + return phycr & FTMAC100_PHYCR_MIIRDATA; + + usleep_range(100, 1000); + } + + netdev_err(netdev, "mdio read timed out\n"); + return 0; +} + +static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg, + int data) +{ + struct ftmac100 *priv = netdev_priv(netdev); + unsigned int phycr; + int i; + + phycr = FTMAC100_PHYCR_PHYAD(phy_id) | + FTMAC100_PHYCR_REGAD(reg) | + FTMAC100_PHYCR_MIIWR; + + data = FTMAC100_PHYWDATA_MIIWDATA(data); + + iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA); + iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR); + + for (i = 0; i < 10; i++) { + phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR); + + if ((phycr & FTMAC100_PHYCR_MIIWR) == 0) + return; + + usleep_range(100, 1000); + } + + netdev_err(netdev, "mdio write timed out\n"); +} + +/****************************************************************************** + * struct ethtool_ops functions + *****************************************************************************/ +static void ftmac100_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_NAME); + strcpy(info->version, DRV_VERSION); + strcpy(info->bus_info, dev_name(&netdev->dev)); +} + +static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct ftmac100 *priv = netdev_priv(netdev); + return mii_ethtool_gset(&priv->mii, cmd); +} + +static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) +{ + struct ftmac100 *priv = netdev_priv(netdev); + return mii_ethtool_sset(&priv->mii, cmd); +} + +static int ftmac100_nway_reset(struct net_device *netdev) +{ + struct ftmac100 *priv = netdev_priv(netdev); + return mii_nway_restart(&priv->mii); +} + +static u32 ftmac100_get_link(struct net_device *netdev) +{ + struct ftmac100 *priv = netdev_priv(netdev); + return mii_link_ok(&priv->mii); +} + +static const struct ethtool_ops ftmac100_ethtool_ops = { + .set_settings = ftmac100_set_settings, + .get_settings = ftmac100_get_settings, + .get_drvinfo = ftmac100_get_drvinfo, + .nway_reset = ftmac100_nway_reset, + .get_link = ftmac100_get_link, +}; + +/****************************************************************************** + * interrupt handler + *****************************************************************************/ +static irqreturn_t ftmac100_interrupt(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct ftmac100 *priv = netdev_priv(netdev); + + if (likely(netif_running(netdev))) { + /* Disable interrupts for polling */ + ftmac100_disable_all_int(priv); + napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +/****************************************************************************** + * struct napi_struct functions + *****************************************************************************/ +static int ftmac100_poll(struct napi_struct *napi, int budget) +{ + struct ftmac100 *priv = container_of(napi, struct ftmac100, napi); + struct net_device *netdev = priv->netdev; + unsigned int status; + bool completed = true; + int rx = 0; + + status = ioread32(priv->base + FTMAC100_OFFSET_ISR); + + if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) { + /* + * FTMAC100_INT_RPKT_FINISH: + * RX DMA has received packets into RX buffer successfully + * + * FTMAC100_INT_NORXBUF: + * RX buffer unavailable + */ + bool retry; + + do { + retry = ftmac100_rx_packet(priv, &rx); + } while (retry && rx < budget); + + if (retry && rx == budget) + completed = false; + } + + if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) { + /* + * FTMAC100_INT_XPKT_OK: + * packet transmitted to ethernet successfully + * + * FTMAC100_INT_XPKT_LOST: + * packet transmitted to ethernet lost due to late + * collision or excessive collision + */ + ftmac100_tx_complete(priv); + } + + if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST | + FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) { + if (net_ratelimit()) + netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status, + status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "", + status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "", + status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "", + status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : ""); + + if (status & FTMAC100_INT_NORXBUF) { + /* RX buffer unavailable */ + netdev->stats.rx_over_errors++; + } + + if (status & FTMAC100_INT_RPKT_LOST) { + /* received packet lost due to RX FIFO full */ + netdev->stats.rx_fifo_errors++; + } + + if (status & FTMAC100_INT_PHYSTS_CHG) { + /* PHY link status change */ + mii_check_link(&priv->mii); + } + } + + if (completed) { + /* stop polling */ + napi_complete(napi); + ftmac100_enable_all_int(priv); + } + + return rx; +} + +/****************************************************************************** + * struct net_device_ops functions + *****************************************************************************/ +static int ftmac100_open(struct net_device *netdev) +{ + struct ftmac100 *priv = netdev_priv(netdev); + int err; + + err = ftmac100_alloc_buffers(priv); + if (err) { + netdev_err(netdev, "failed to allocate buffers\n"); + goto err_alloc; + } + + err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev); + if (err) { + netdev_err(netdev, "failed to request irq %d\n", priv->irq); + goto err_irq; + } + + priv->rx_pointer = 0; + priv->tx_clean_pointer = 0; + priv->tx_pointer = 0; + priv->tx_pending = 0; + + err = ftmac100_start_hw(priv); + if (err) + goto err_hw; + + napi_enable(&priv->napi); + netif_start_queue(netdev); + + ftmac100_enable_all_int(priv); + + return 0; + +err_hw: + free_irq(priv->irq, netdev); +err_irq: + ftmac100_free_buffers(priv); +err_alloc: + return err; +} + +static int ftmac100_stop(struct net_device *netdev) +{ + struct ftmac100 *priv = netdev_priv(netdev); + + ftmac100_disable_all_int(priv); + netif_stop_queue(netdev); + napi_disable(&priv->napi); + ftmac100_stop_hw(priv); + free_irq(priv->irq, netdev); + ftmac100_free_buffers(priv); + + return 0; +} + +static int ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct ftmac100 *priv = netdev_priv(netdev); + dma_addr_t map; + + if (unlikely(skb->len > MAX_PKT_SIZE)) { + if (net_ratelimit()) + netdev_dbg(netdev, "tx packet too big\n"); + + netdev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, map))) { + /* drop packet */ + if (net_ratelimit()) + netdev_err(netdev, "map socket buffer failed\n"); + + netdev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + + return ftmac100_xmit(priv, skb, map); +} + +/* optional */ +static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct ftmac100 *priv = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + return generic_mii_ioctl(&priv->mii, data, cmd, NULL); +} + +static const struct net_device_ops ftmac100_netdev_ops = { + .ndo_open = ftmac100_open, + .ndo_stop = ftmac100_stop, + .ndo_start_xmit = ftmac100_hard_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = ftmac100_do_ioctl, +}; + +/****************************************************************************** + * struct platform_driver functions + *****************************************************************************/ +static int ftmac100_probe(struct platform_device *pdev) +{ + struct resource *res; + int irq; + struct net_device *netdev; + struct ftmac100 *priv; + int err; + + if (!pdev) + return -ENODEV; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENXIO; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + /* setup net_device */ + netdev = alloc_etherdev(sizeof(*priv)); + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops); + netdev->netdev_ops = &ftmac100_netdev_ops; + + platform_set_drvdata(pdev, netdev); + + /* setup private data */ + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->dev = &pdev->dev; + + spin_lock_init(&priv->tx_lock); + + /* initialize NAPI */ + netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64); + + /* map io memory */ + priv->res = request_mem_region(res->start, resource_size(res), + dev_name(&pdev->dev)); + if (!priv->res) { + dev_err(&pdev->dev, "Could not reserve memory region\n"); + err = -ENOMEM; + goto err_req_mem; + } + + priv->base = ioremap(res->start, res->end - res->start); + if (!priv->base) { + dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); + err = -EIO; + goto err_ioremap; + } + + priv->irq = irq; + + /* initialize struct mii_if_info */ + priv->mii.phy_id = 0; + priv->mii.phy_id_mask = 0x1f; + priv->mii.reg_num_mask = 0x1f; + priv->mii.dev = netdev; + priv->mii.mdio_read = ftmac100_mdio_read; + priv->mii.mdio_write = ftmac100_mdio_write; + + /* register network device */ + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev\n"); + goto err_register_netdev; + } + + netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + random_ether_addr(netdev->dev_addr); + netdev_info(netdev, "generated random MAC address %pM\n", + netdev->dev_addr); + } + + return 0; + +err_register_netdev: + iounmap(priv->base); +err_ioremap: + release_resource(priv->res); +err_req_mem: + netif_napi_del(&priv->napi); + platform_set_drvdata(pdev, NULL); + free_netdev(netdev); +err_alloc_etherdev: + return err; +} + +static int __exit ftmac100_remove(struct platform_device *pdev) +{ + struct net_device *netdev; + struct ftmac100 *priv; + + netdev = platform_get_drvdata(pdev); + priv = netdev_priv(netdev); + + unregister_netdev(netdev); + + iounmap(priv->base); + release_resource(priv->res); + + netif_napi_del(&priv->napi); + platform_set_drvdata(pdev, NULL); + free_netdev(netdev); + return 0; +} + +static struct platform_driver ftmac100_driver = { + .probe = ftmac100_probe, + .remove = __exit_p(ftmac100_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +/****************************************************************************** + * initialization / finalization + *****************************************************************************/ +static int __init ftmac100_init(void) +{ + pr_info("Loading version " DRV_VERSION " ...\n"); + return platform_driver_register(&ftmac100_driver); +} + +static void __exit ftmac100_exit(void) +{ + platform_driver_unregister(&ftmac100_driver); +} + +module_init(ftmac100_init); +module_exit(ftmac100_exit); + +MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); +MODULE_DESCRIPTION("FTMAC100 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ftmac100.h b/drivers/net/ftmac100.h new file mode 100644 index 000000000000..46a0c47b1ee1 --- /dev/null +++ b/drivers/net/ftmac100.h @@ -0,0 +1,180 @@ +/* + * Faraday FTMAC100 10/100 Ethernet + * + * (C) Copyright 2009-2011 Faraday Technology + * Po-Yu Chuang <ratbert@faraday-tech.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __FTMAC100_H +#define __FTMAC100_H + +#define FTMAC100_OFFSET_ISR 0x00 +#define FTMAC100_OFFSET_IMR 0x04 +#define FTMAC100_OFFSET_MAC_MADR 0x08 +#define FTMAC100_OFFSET_MAC_LADR 0x0c +#define FTMAC100_OFFSET_MAHT0 0x10 +#define FTMAC100_OFFSET_MAHT1 0x14 +#define FTMAC100_OFFSET_TXPD 0x18 +#define FTMAC100_OFFSET_RXPD 0x1c +#define FTMAC100_OFFSET_TXR_BADR 0x20 +#define FTMAC100_OFFSET_RXR_BADR 0x24 +#define FTMAC100_OFFSET_ITC 0x28 +#define FTMAC100_OFFSET_APTC 0x2c +#define FTMAC100_OFFSET_DBLAC 0x30 +#define FTMAC100_OFFSET_MACCR 0x88 +#define FTMAC100_OFFSET_MACSR 0x8c +#define FTMAC100_OFFSET_PHYCR 0x90 +#define FTMAC100_OFFSET_PHYWDATA 0x94 +#define FTMAC100_OFFSET_FCR 0x98 +#define FTMAC100_OFFSET_BPR 0x9c +#define FTMAC100_OFFSET_TS 0xc4 +#define FTMAC100_OFFSET_DMAFIFOS 0xc8 +#define FTMAC100_OFFSET_TM 0xcc +#define FTMAC100_OFFSET_TX_MCOL_SCOL 0xd4 +#define FTMAC100_OFFSET_RPF_AEP 0xd8 +#define FTMAC100_OFFSET_XM_PG 0xdc +#define FTMAC100_OFFSET_RUNT_TLCC 0xe0 +#define FTMAC100_OFFSET_CRCER_FTL 0xe4 +#define FTMAC100_OFFSET_RLC_RCC 0xe8 +#define FTMAC100_OFFSET_BROC 0xec +#define FTMAC100_OFFSET_MULCA 0xf0 +#define FTMAC100_OFFSET_RP 0xf4 +#define FTMAC100_OFFSET_XP 0xf8 + +/* + * Interrupt status register & interrupt mask register + */ +#define FTMAC100_INT_RPKT_FINISH (1 << 0) +#define FTMAC100_INT_NORXBUF (1 << 1) +#define FTMAC100_INT_XPKT_FINISH (1 << 2) +#define FTMAC100_INT_NOTXBUF (1 << 3) +#define FTMAC100_INT_XPKT_OK (1 << 4) +#define FTMAC100_INT_XPKT_LOST (1 << 5) +#define FTMAC100_INT_RPKT_SAV (1 << 6) +#define FTMAC100_INT_RPKT_LOST (1 << 7) +#define FTMAC100_INT_AHB_ERR (1 << 8) +#define FTMAC100_INT_PHYSTS_CHG (1 << 9) + +/* + * Interrupt timer control register + */ +#define FTMAC100_ITC_RXINT_CNT(x) (((x) & 0xf) << 0) +#define FTMAC100_ITC_RXINT_THR(x) (((x) & 0x7) << 4) +#define FTMAC100_ITC_RXINT_TIME_SEL (1 << 7) +#define FTMAC100_ITC_TXINT_CNT(x) (((x) & 0xf) << 8) +#define FTMAC100_ITC_TXINT_THR(x) (((x) & 0x7) << 12) +#define FTMAC100_ITC_TXINT_TIME_SEL (1 << 15) + +/* + * Automatic polling timer control register + */ +#define FTMAC100_APTC_RXPOLL_CNT(x) (((x) & 0xf) << 0) +#define FTMAC100_APTC_RXPOLL_TIME_SEL (1 << 4) +#define FTMAC100_APTC_TXPOLL_CNT(x) (((x) & 0xf) << 8) +#define FTMAC100_APTC_TXPOLL_TIME_SEL (1 << 12) + +/* + * DMA burst length and arbitration control register + */ +#define FTMAC100_DBLAC_INCR4_EN (1 << 0) +#define FTMAC100_DBLAC_INCR8_EN (1 << 1) +#define FTMAC100_DBLAC_INCR16_EN (1 << 2) +#define FTMAC100_DBLAC_RXFIFO_LTHR(x) (((x) & 0x7) << 3) +#define FTMAC100_DBLAC_RXFIFO_HTHR(x) (((x) & 0x7) << 6) +#define FTMAC100_DBLAC_RX_THR_EN (1 << 9) + +/* + * MAC control register + */ +#define FTMAC100_MACCR_XDMA_EN (1 << 0) +#define FTMAC100_MACCR_RDMA_EN (1 << 1) +#define FTMAC100_MACCR_SW_RST (1 << 2) +#define FTMAC100_MACCR_LOOP_EN (1 << 3) +#define FTMAC100_MACCR_CRC_DIS (1 << 4) +#define FTMAC100_MACCR_XMT_EN (1 << 5) +#define FTMAC100_MACCR_ENRX_IN_HALFTX (1 << 6) +#define FTMAC100_MACCR_RCV_EN (1 << 8) +#define FTMAC100_MACCR_HT_MULTI_EN (1 << 9) +#define FTMAC100_MACCR_RX_RUNT (1 << 10) +#define FTMAC100_MACCR_RX_FTL (1 << 11) +#define FTMAC100_MACCR_RCV_ALL (1 << 12) +#define FTMAC100_MACCR_CRC_APD (1 << 14) +#define FTMAC100_MACCR_FULLDUP (1 << 15) +#define FTMAC100_MACCR_RX_MULTIPKT (1 << 16) +#define FTMAC100_MACCR_RX_BROADPKT (1 << 17) + +/* + * PHY control register + */ +#define FTMAC100_PHYCR_MIIRDATA 0xffff +#define FTMAC100_PHYCR_PHYAD(x) (((x) & 0x1f) << 16) +#define FTMAC100_PHYCR_REGAD(x) (((x) & 0x1f) << 21) +#define FTMAC100_PHYCR_MIIRD (1 << 26) +#define FTMAC100_PHYCR_MIIWR (1 << 27) + +/* + * PHY write data register + */ +#define FTMAC100_PHYWDATA_MIIWDATA(x) ((x) & 0xffff) + +/* + * Transmit descriptor, aligned to 16 bytes + */ +struct ftmac100_txdes { + unsigned int txdes0; + unsigned int txdes1; + unsigned int txdes2; /* TXBUF_BADR */ + unsigned int txdes3; /* not used by HW */ +} __attribute__ ((aligned(16))); + +#define FTMAC100_TXDES0_TXPKT_LATECOL (1 << 0) +#define FTMAC100_TXDES0_TXPKT_EXSCOL (1 << 1) +#define FTMAC100_TXDES0_TXDMA_OWN (1 << 31) + +#define FTMAC100_TXDES1_TXBUF_SIZE(x) ((x) & 0x7ff) +#define FTMAC100_TXDES1_LTS (1 << 27) +#define FTMAC100_TXDES1_FTS (1 << 28) +#define FTMAC100_TXDES1_TX2FIC (1 << 29) +#define FTMAC100_TXDES1_TXIC (1 << 30) +#define FTMAC100_TXDES1_EDOTR (1 << 31) + +/* + * Receive descriptor, aligned to 16 bytes + */ +struct ftmac100_rxdes { + unsigned int rxdes0; + unsigned int rxdes1; + unsigned int rxdes2; /* RXBUF_BADR */ + unsigned int rxdes3; /* not used by HW */ +} __attribute__ ((aligned(16))); + +#define FTMAC100_RXDES0_RFL 0x7ff +#define FTMAC100_RXDES0_MULTICAST (1 << 16) +#define FTMAC100_RXDES0_BROADCAST (1 << 17) +#define FTMAC100_RXDES0_RX_ERR (1 << 18) +#define FTMAC100_RXDES0_CRC_ERR (1 << 19) +#define FTMAC100_RXDES0_FTL (1 << 20) +#define FTMAC100_RXDES0_RUNT (1 << 21) +#define FTMAC100_RXDES0_RX_ODD_NB (1 << 22) +#define FTMAC100_RXDES0_LRS (1 << 28) +#define FTMAC100_RXDES0_FRS (1 << 29) +#define FTMAC100_RXDES0_RXDMA_OWN (1 << 31) + +#define FTMAC100_RXDES1_RXBUF_SIZE(x) ((x) & 0x7ff) +#define FTMAC100_RXDES1_EDORR (1 << 31) + +#endif /* __FTMAC100_H */ diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index ac1d323c5eb5..8931168d3e74 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -400,13 +400,14 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos) static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct list_head *p; + struct bpqdev *bpqdev = v; ++*pos; if (v == SEQ_START_TOKEN) - p = rcu_dereference(bpq_devices.next); + p = rcu_dereference(list_next_rcu(&bpq_devices)); else - p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next); + p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list)); return (p == &bpq_devices) ? NULL : list_entry(p, struct bpqdev, bpq_list); diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 0a2368fa6bc6..6b256c275e10 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -64,7 +64,14 @@ static s32 igb_reset_init_script_82575(struct e1000_hw *); static s32 igb_read_mac_addr_82575(struct e1000_hw *); static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); - +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); static const u16 e1000_82580_rxpbs_table[] = { 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; @@ -129,6 +136,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) break; case E1000_DEV_ID_82580_COPPER: case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_QUAD_FIBER: case E1000_DEV_ID_82580_SERDES: case E1000_DEV_ID_82580_SGMII: case E1000_DEV_ID_82580_COPPER_DUAL: @@ -194,7 +202,11 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) mac->arc_subsystem_valid = (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) ? true : false; - + /* enable EEE on i350 parts */ + if (mac->type == e1000_i350) + dev_spec->eee_disable = false; + else + dev_spec->eee_disable = true; /* physical interface link setup */ mac->ops.setup_physical_interface = (hw->phy.media_type == e1000_media_type_copper) @@ -232,14 +244,42 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) */ size += NVM_WORD_SIZE_BASE_SHIFT; - /* EEPROM access above 16k is unsupported */ - if (size > 14) - size = 14; nvm->word_size = 1 << size; + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; - /* if 82576 then initialize mailbox parameters */ - if (mac->type == e1000_82576) + /* NVM Function Pointers */ + nvm->ops.acquire = igb_acquire_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + + nvm->ops.release = igb_release_nvm_82575; + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = igb_validate_nvm_checksum_82580; + nvm->ops.update = igb_update_nvm_checksum_82580; + break; + case e1000_i350: + nvm->ops.validate = igb_validate_nvm_checksum_i350; + nvm->ops.update = igb_update_nvm_checksum_i350; + break; + default: + nvm->ops.validate = igb_validate_nvm_checksum; + nvm->ops.update = igb_update_nvm_checksum; + } + nvm->ops.write = igb_write_nvm_spi; + + /* if part supports SR-IOV then initialize mailbox parameters */ + switch (mac->type) { + case e1000_82576: + case e1000_i350: igb_init_mbx_params_pf(hw); + break; + default: + break; + } /* setup PHY parameters */ if (phy->media_type != e1000_media_type_copper) { @@ -1747,6 +1787,248 @@ u16 igb_rxpbs_adjust_82580(u32 data) return ret_val; } +/** + * igb_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if chekcsums compatibility bit is set validate checksums + * for all 4 ports. */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum" + " compatibility bit.\n"); + goto out; + } + + if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Write Error while updating checksum" + " compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} +/** + * igb_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 ipcnfg, eeer, ctrl_ext; + + ctrl_ext = rd32(E1000_CTRL_EXT); + if ((hw->mac.type != e1000_i350) || + (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK)) + goto out; + ipcnfg = rd32(E1000_IPCNFG); + eeer = rd32(E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer |= (E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + wr32(E1000_IPCNFG, ipcnfg); + wr32(E1000_EEER, eeer); +out: + + return ret_val; +} + static struct e1000_mac_operations e1000_mac_ops_82575 = { .init_hw = igb_init_hw_82575, .check_for_link = igb_check_for_link_82575, diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index 1d01af2472e7..dd6df3498998 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h @@ -251,5 +251,6 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); u16 igb_rxpbs_adjust_82580(u32 data); +s32 igb_set_eee_i350(struct e1000_hw *); #endif diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index 6319ed902bc0..6b80d40110ca 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h @@ -51,6 +51,7 @@ #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 #define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 #define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 #define E1000_CTRL_EXT_EIAME 0x01000000 #define E1000_CTRL_EXT_IRCA 0x00000001 /* Interrupt delay cancellation */ @@ -110,6 +111,7 @@ /* Management Control */ #define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ #define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_EN_BMC2OS 0x10000000 /* OSBMC is Enabled or not */ /* Enable Neighbor Discovery Filtering */ #define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ #define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ @@ -286,7 +288,34 @@ #define E1000_TCTL_COLD 0x003ff000 /* collision distance */ #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ -/* Transmit Arbitration Count */ +/* DMA Coalescing register fields */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing + * Watchdog Timer */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive + * Threshold */ +#define E1000_DMACR_DMACTHR_SHIFT 16 +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe + * transactions */ +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ + +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit + * Threshold */ + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate + * Threshold */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in + * current window */ + +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic + * Current Cnt */ + +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold + * High val */ +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ /* SerDes Control */ #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 @@ -565,6 +594,8 @@ #define NVM_INIT_CONTROL3_PORT_A 0x0024 #define NVM_ALT_MAC_ADDR_PTR 0x0037 #define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 #define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ #define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ @@ -599,6 +630,7 @@ /* NVM Commands - SPI */ #define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ #define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ #define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ #define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ #define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ @@ -757,6 +789,17 @@ #define E1000_MDIC_ERROR 0x40000000 #define E1000_MDIC_DEST 0x80000000 +/* Thermal Sensor */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ + +/* Energy Efficient Ethernet */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ + /* SerDes Control */ #define E1000_GEN_CTL_READY 0x80000000 #define E1000_GEN_CTL_ADDRESS_SHIFT 8 @@ -770,4 +813,11 @@ #define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based on DMA coal */ +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + #endif diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index e2638afb8cdc..27153e8d7b16 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h @@ -54,6 +54,7 @@ struct e1000_hw; #define E1000_DEV_ID_82580_SERDES 0x1510 #define E1000_DEV_ID_82580_SGMII 0x1511 #define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 #define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 #define E1000_DEV_ID_DH89XXCC_SERDES 0x043A #define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C @@ -247,6 +248,10 @@ struct e1000_hw_stats { u64 scvpc; u64 hrmpc; u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; }; struct e1000_phy_stats { @@ -331,6 +336,8 @@ struct e1000_nvm_operations { s32 (*read)(struct e1000_hw *, u16, u16, u16 *); void (*release)(struct e1000_hw *); s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); }; struct e1000_info { @@ -417,7 +424,6 @@ struct e1000_phy_info { struct e1000_nvm_info { struct e1000_nvm_operations ops; - enum e1000_nvm_type type; enum e1000_nvm_override override; @@ -483,6 +489,7 @@ struct e1000_mbx_info { struct e1000_dev_spec_82575 { bool sgmii_active; bool global_device_reset; + bool eee_disable; }; struct e1000_hw { diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c index c474cdb70047..78d48c7fa859 100644 --- a/drivers/net/igb/e1000_mbx.c +++ b/drivers/net/igb/e1000_mbx.c @@ -422,26 +422,24 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw) { struct e1000_mbx_info *mbx = &hw->mbx; - if (hw->mac.type == e1000_82576) { - mbx->timeout = 0; - mbx->usec_delay = 0; - - mbx->size = E1000_VFMAILBOX_SIZE; - - mbx->ops.read = igb_read_mbx_pf; - mbx->ops.write = igb_write_mbx_pf; - mbx->ops.read_posted = igb_read_posted_mbx; - mbx->ops.write_posted = igb_write_posted_mbx; - mbx->ops.check_for_msg = igb_check_for_msg_pf; - mbx->ops.check_for_ack = igb_check_for_ack_pf; - mbx->ops.check_for_rst = igb_check_for_rst_pf; - - mbx->stats.msgs_tx = 0; - mbx->stats.msgs_rx = 0; - mbx->stats.reqs = 0; - mbx->stats.acks = 0; - mbx->stats.rsts = 0; - } + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = igb_read_mbx_pf; + mbx->ops.write = igb_write_mbx_pf; + mbx->ops.read_posted = igb_read_posted_mbx; + mbx->ops.write_posted = igb_write_posted_mbx; + mbx->ops.check_for_msg = igb_check_for_msg_pf; + mbx->ops.check_for_ack = igb_check_for_ack_pf; + mbx->ops.check_for_rst = igb_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; return 0; } diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c index 6b5cc2cc453d..75bf36a4baee 100644 --- a/drivers/net/igb/e1000_nvm.c +++ b/drivers/net/igb/e1000_nvm.c @@ -318,6 +318,68 @@ out: } /** + * igb_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igb_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* + * Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = igb_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** * igb_read_nvm_eerd - Reads EEPROM using EERD register * @hw: pointer to the HW structure * @offset: offset of word in the EEPROM to read @@ -353,7 +415,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) break; data[i] = (rd32(E1000_EERD) >> - E1000_NVM_RW_REG_DATA); + E1000_NVM_RW_REG_DATA); } out: diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h index 29c956a84bd0..7f43564c4bcc 100644 --- a/drivers/net/igb/e1000_nvm.h +++ b/drivers/net/igb/e1000_nvm.h @@ -35,6 +35,7 @@ s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size); s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); s32 igb_validate_nvm_checksum(struct e1000_hw *hw); s32 igb_update_nvm_checksum(struct e1000_hw *hw); diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index 8ac83c5190d5..958ca3bda482 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h @@ -106,6 +106,19 @@ #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* TX Rate Limit Registers */ +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ + /* Split and Replication RX Control - RW */ #define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ /* @@ -324,4 +337,18 @@ /* DMA Coalescing registers */ #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* Energy Efficient Ethernet "EEE" register */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ + +/* Thermal Sensor Register */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + #endif diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 92a4ef09e55c..1c687e298d5e 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -77,6 +77,7 @@ struct vf_data_storage { unsigned long last_nack; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; + u16 tx_rate; }; #define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ @@ -323,6 +324,7 @@ struct igb_adapter { u16 rx_ring_count; unsigned int vfs_allocated_count; struct vf_data_storage *vf_data; + int vf_rate_link_speed; u32 rss_queues; u32 wvbr; }; @@ -331,6 +333,12 @@ struct igb_adapter { #define IGB_FLAG_DCA_ENABLED (1 << 1) #define IGB_FLAG_QUAD_PORT_A (1 << 2) #define IGB_FLAG_QUEUE_PAIRS (1 << 3) +#define IGB_FLAG_DMAC (1 << 4) + +/* DMA Coalescing defines */ +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ #define IGB_82576_TSYNC_SHIFT 19 #define IGB_82580_TSYNC_SHIFT 24 diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index a70e16bcfa7e..d976733bbcc2 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -86,6 +86,10 @@ static const struct igb_stats igb_gstrings_stats[] = { IGB_STAT("tx_smbus", stats.mgptc), IGB_STAT("rx_smbus", stats.mgprc), IGB_STAT("dropped_smbus", stats.mgpdc), + IGB_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + IGB_STAT("os2bmc_tx_by_host", stats.o2bspc), + IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc), }; #define IGB_NETDEV_STAT(_net_stat) { \ @@ -603,7 +607,10 @@ static void igb_get_regs(struct net_device *netdev, regs_buff[548] = rd32(E1000_TDFT); regs_buff[549] = rd32(E1000_TDFHS); regs_buff[550] = rd32(E1000_TDFPC); - + regs_buff[551] = adapter->stats.o2bgptc; + regs_buff[552] = adapter->stats.b2ospc; + regs_buff[553] = adapter->stats.o2bspc; + regs_buff[554] = adapter->stats.b2ogprc; } static int igb_get_eeprom_len(struct net_device *netdev) @@ -714,7 +721,7 @@ static int igb_set_eeprom(struct net_device *netdev, /* Update the checksum over the first part of the EEPROM if needed * and flush shadow RAM for 82573 controllers */ if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) - igb_update_nvm_checksum(hw); + hw->nvm.ops.update(hw); kfree(eeprom_buff); return ret_val; @@ -727,8 +734,9 @@ static void igb_get_drvinfo(struct net_device *netdev, char firmware_version[32]; u16 eeprom_data; - strncpy(drvinfo->driver, igb_driver_name, 32); - strncpy(drvinfo->version, igb_driver_version, 32); + strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1); + strncpy(drvinfo->version, igb_driver_version, + sizeof(drvinfo->version) - 1); /* EEPROM image version # is reported as firmware version # for * 82575 controllers */ @@ -738,8 +746,10 @@ static void igb_get_drvinfo(struct net_device *netdev, (eeprom_data & 0x0FF0) >> 4, eeprom_data & 0x000F); - strncpy(drvinfo->fw_version, firmware_version, 32); - strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + strncpy(drvinfo->fw_version, firmware_version, + sizeof(drvinfo->fw_version) - 1); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info) - 1); drvinfo->n_stats = IGB_STATS_LEN; drvinfo->testinfo_len = IGB_TEST_LEN; drvinfo->regdump_len = igb_get_regs_len(netdev); @@ -1070,7 +1080,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { wr32(reg, (_test[pat] & write)); - val = rd32(reg); + val = rd32(reg) & mask; if (val != (_test[pat] & write & mask)) { dev_err(&adapter->pdev->dev, "pattern test reg %04X " "failed: got 0x%08X expected 0x%08X\n", @@ -1999,6 +2009,12 @@ static int igb_set_coalesce(struct net_device *netdev, if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) return -EINVAL; + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGB_FLAG_DMAC) + adapter->flags &= ~IGB_FLAG_DMAC; + } + /* convert to rate of irq's per second */ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) adapter->rx_itr_setting = ec->rx_coalesce_usecs; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 58c665b7513d..3d850af0cdda 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -50,12 +50,17 @@ #endif #include "igb.h" -#define DRV_VERSION "2.1.0-k2" +#define MAJ 3 +#define MIN 0 +#define BUILD 6 +#define KFIX 2 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ +__stringify(BUILD) "-k" __stringify(KFIX) char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; -static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation."; +static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation."; static const struct e1000_info *igb_info_tbl[] = { [board_82575] = &e1000_82575_info, @@ -68,6 +73,7 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, @@ -100,6 +106,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *); static void igb_setup_mrqc(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); static void __devexit igb_remove(struct pci_dev *pdev); +static void igb_init_hw_timer(struct igb_adapter *adapter); static int igb_sw_init(struct igb_adapter *); static int igb_open(struct net_device *); static int igb_close(struct net_device *); @@ -149,6 +156,7 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); +static void igb_check_vf_rate_limit(struct igb_adapter *); #ifdef CONFIG_PM static int igb_suspend(struct pci_dev *, pm_message_t); @@ -1672,7 +1680,58 @@ void igb_reset(struct igb_adapter *adapter) if (hw->mac.ops.init_hw(hw)) dev_err(&pdev->dev, "Hardware Error\n"); + if (hw->mac.type > e1000_82580) { + if (adapter->flags & IGB_FLAG_DMAC) { + u32 reg; + /* + * DMA Coalescing high water mark needs to be higher + * than * the * Rx threshold. The Rx threshold is + * currently * pba - 6, so we * should use a high water + * mark of pba * - 4. */ + hwm = (pba - 4) << 10; + + reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); + + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + + /* watchdog timer= +-1000 usec in 32usec intervals */ + reg |= (1000 >> 5); + wr32(E1000_DMACR, reg); + + /* no lower threshold to disable coalescing(smart fifb) + * -UTRESH=0*/ + wr32(E1000_DMCRTRH, 0); + + /* set hwm to PBA - 2 * max frame size */ + wr32(E1000_FCRTC, hwm); + + /* + * This sets the time to wait before requesting tran- + * sition to * low power state to number of usecs needed + * to receive 1 512 * byte frame at gigabit line rate + */ + reg = rd32(E1000_DMCTLX); + reg |= IGB_DMCTLX_DCFLUSH_DIS; + + /* Delay 255 usec before entering Lx state. */ + reg |= 0xFF; + wr32(E1000_DMCTLX, reg); + + /* free space in Tx packet buffer to wake from DMAC */ + wr32(E1000_DMCTXTH, + (IGB_MIN_TXPBSIZE - + (IGB_TX_BUF_4096 + adapter->max_frame_size)) + >> 6); + + /* make low power state decision controlled by DMAC */ + reg = rd32(E1000_PCIEMISC); + reg |= E1000_PCIEMISC_LX_DECISION; + wr32(E1000_PCIEMISC, reg); + } /* end if IGB_FLAG_DMAC set */ + } if (hw->mac.type == e1000_82580) { u32 reg = rd32(E1000_PCIEMISC); wr32(E1000_PCIEMISC, @@ -1882,7 +1941,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, hw->mac.ops.reset_hw(hw); /* make sure the NVM is good */ - if (igb_validate_nvm_checksum(hw) < 0) { + if (hw->nvm.ops.validate(hw) < 0) { dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; @@ -1990,6 +2049,9 @@ static int __devinit igb_probe(struct pci_dev *pdev, } #endif + /* do hw tstamp init after resetting */ + igb_init_hw_timer(adapter); + dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); /* print bus type/speed/width info */ dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", @@ -2012,7 +2074,13 @@ static int __devinit igb_probe(struct pci_dev *pdev, adapter->msix_entries ? "MSI-X" : (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", adapter->num_rx_queues, adapter->num_tx_queues); - + switch (hw->mac.type) { + case e1000_i350: + igb_set_eee_i350(hw); + break; + default: + break; + } return 0; err_register: @@ -2149,6 +2217,9 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter) random_ether_addr(mac_addr); igb_set_vf_mac(adapter, i, mac_addr); } + /* DMA Coalescing is not supported in IOV mode. */ + if (adapter->flags & IGB_FLAG_DMAC) + adapter->flags &= ~IGB_FLAG_DMAC; } #endif /* CONFIG_PCI_IOV */ } @@ -2286,9 +2357,19 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) spin_lock_init(&adapter->stats64_lock); #ifdef CONFIG_PCI_IOV - if (hw->mac.type == e1000_82576) - adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; - + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + if (max_vfs > 7) { + dev_warn(&pdev->dev, + "Maximum of 7 VFs per PF, using max\n"); + adapter->vfs_allocated_count = 7; + } else + adapter->vfs_allocated_count = max_vfs; + break; + default: + break; + } #endif /* CONFIG_PCI_IOV */ adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); @@ -2307,12 +2388,14 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) return -ENOMEM; } - igb_init_hw_timer(adapter); igb_probe_vfs(adapter); /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); + if (hw->mac.type == e1000_i350) + adapter->flags &= ~IGB_FLAG_DMAC; + set_bit(__IGB_DOWN, &adapter->state); return 0; } @@ -3467,7 +3550,7 @@ static void igb_watchdog_task(struct work_struct *work) watchdog_task); struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; - u32 link; + u32 link, ctrl_ext, thstat; int i; link = igb_has_link(adapter); @@ -3491,6 +3574,25 @@ static void igb_watchdog_task(struct work_struct *work) ((ctrl & E1000_CTRL_RFCE) ? "RX" : ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None"))); + /* check for thermal sensor event on i350, + * copper only */ + if (hw->mac.type == e1000_i350) { + thstat = rd32(E1000_THSTAT); + ctrl_ext = rd32(E1000_CTRL_EXT); + if ((hw->phy.media_type == + e1000_media_type_copper) && !(ctrl_ext & + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + if (thstat & + E1000_THSTAT_LINK_THROTTLE) { + printk(KERN_INFO "igb: %s The " + "network adapter link " + "speed was downshifted " + "because it " + "overheated.\n", + netdev->name); + } + } + } /* adjust timeout factor according to speed/duplex */ adapter->tx_timeout_factor = 1; switch (adapter->link_speed) { @@ -3505,6 +3607,7 @@ static void igb_watchdog_task(struct work_struct *work) netif_carrier_on(netdev); igb_ping_all_vfs(adapter); + igb_check_vf_rate_limit(adapter); /* link state has changed, schedule phy info update */ if (!test_bit(__IGB_DOWN, &adapter->state)) @@ -3515,6 +3618,22 @@ static void igb_watchdog_task(struct work_struct *work) if (netif_carrier_ok(netdev)) { adapter->link_speed = 0; adapter->link_duplex = 0; + /* check for thermal sensor event on i350 + * copper only*/ + if (hw->mac.type == e1000_i350) { + thstat = rd32(E1000_THSTAT); + ctrl_ext = rd32(E1000_CTRL_EXT); + if ((hw->phy.media_type == + e1000_media_type_copper) && !(ctrl_ext & + E1000_CTRL_EXT_LINK_MODE_SGMII)) { + if (thstat & E1000_THSTAT_PWR_DOWN) { + printk(KERN_ERR "igb: %s The " + "network adapter was stopped " + "because it overheated.\n", + netdev->name); + } + } + } /* Links status message must follow this format */ printk(KERN_INFO "igb: %s NIC Link is Down\n", netdev->name); @@ -4547,6 +4666,15 @@ void igb_update_stats(struct igb_adapter *adapter, adapter->stats.mgptc += rd32(E1000_MGTPTC); adapter->stats.mgprc += rd32(E1000_MGTPRC); adapter->stats.mgpdc += rd32(E1000_MGTPDC); + + /* OS2BMC Stats */ + reg = rd32(E1000_MANC); + if (reg & E1000_MANC_EN_BMC2OS) { + adapter->stats.o2bgptc += rd32(E1000_O2BGPTC); + adapter->stats.o2bspc += rd32(E1000_O2BSPC); + adapter->stats.b2ospc += rd32(E1000_B2OSPC); + adapter->stats.b2ogprc += rd32(E1000_B2OGPRC); + } } static irqreturn_t igb_msix_other(int irq, void *data) @@ -6593,9 +6721,91 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) return igb_set_vf_mac(adapter, vf, mac); } +static int igb_link_mbps(int internal_link_speed) +{ + switch (internal_link_speed) { + case SPEED_100: + return 100; + case SPEED_1000: + return 1000; + default: + return 0; + } +} + +static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, + int link_speed) +{ + int rf_dec, rf_int; + u32 bcnrc_val; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); + rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; + + bcnrc_val = E1000_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) & + E1000_RTTBCNRC_RF_INT_MASK); + bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ + wr32(E1000_RTTBCNRC, bcnrc_val); +} + +static void igb_check_vf_rate_limit(struct igb_adapter *adapter) +{ + int actual_link_speed, i; + bool reset_rate = false; + + /* VF TX rate limit was not set or not supported */ + if ((adapter->vf_rate_link_speed == 0) || + (adapter->hw.mac.type != e1000_82576)) + return; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if (actual_link_speed != adapter->vf_rate_link_speed) { + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit " + "rate is disabled\n"); + } + + for (i = 0; i < adapter->vfs_allocated_count; i++) { + if (reset_rate) + adapter->vf_data[i].tx_rate = 0; + + igb_set_vf_rate_limit(&adapter->hw, i, + adapter->vf_data[i].tx_rate, + actual_link_speed); + } +} + static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) { - return -EOPNOTSUPP; + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int actual_link_speed; + + if (hw->mac.type != e1000_82576) + return -EOPNOTSUPP; + + actual_link_speed = igb_link_mbps(adapter->link_speed); + if ((vf >= adapter->vfs_allocated_count) || + (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) || + (tx_rate < 0) || (tx_rate > actual_link_speed)) + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; + adapter->vf_data[vf].tx_rate = (u16)tx_rate; + igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); + + return 0; } static int igb_ndo_get_vf_config(struct net_device *netdev, @@ -6606,7 +6816,7 @@ static int igb_ndo_get_vf_config(struct net_device *netdev, return -EINVAL; ivi->vf = vf; memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN); - ivi->tx_rate = 0; + ivi->tx_rate = adapter->vf_data[vf].tx_rate; ivi->vlan = adapter->vf_data[vf].pf_vlan; ivi->qos = adapter->vf_data[vf].pf_qos; return 0; diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c index ed6e3d910247..1d943aa7c7a6 100644 --- a/drivers/net/igbvf/ethtool.c +++ b/drivers/net/igbvf/ethtool.c @@ -201,13 +201,11 @@ static void igbvf_get_regs(struct net_device *netdev, struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; u32 *regs_buff = p; - u8 revision_id; memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); - pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); - - regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device; + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; regs_buff[0] = er32(CTRL); regs_buff[1] = er32(STATUS); diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h index 990c329e6c3b..d5dad5d607d6 100644 --- a/drivers/net/igbvf/igbvf.h +++ b/drivers/net/igbvf/igbvf.h @@ -201,9 +201,6 @@ struct igbvf_adapter { unsigned int restart_queue; u32 txd_cmd; - bool detect_tx_hung; - u8 tx_timeout_factor; - u32 tx_int_delay; u32 tx_abs_int_delay; diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 6352c8158e6d..6ccc32fd7338 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -396,35 +396,6 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter, buffer_info->time_stamp = 0; } -static void igbvf_print_tx_hang(struct igbvf_adapter *adapter) -{ - struct igbvf_ring *tx_ring = adapter->tx_ring; - unsigned int i = tx_ring->next_to_clean; - unsigned int eop = tx_ring->buffer_info[i].next_to_watch; - union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); - - /* detected Tx unit hang */ - dev_err(&adapter->pdev->dev, - "Detected Tx Unit Hang:\n" - " TDH <%x>\n" - " TDT <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "buffer_info[next_to_clean]:\n" - " time_stamp <%lx>\n" - " next_to_watch <%x>\n" - " jiffies <%lx>\n" - " next_to_watch.status <%x>\n", - readl(adapter->hw.hw_addr + tx_ring->head), - readl(adapter->hw.hw_addr + tx_ring->tail), - tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_ring->buffer_info[eop].time_stamp, - eop, - jiffies, - eop_desc->wb.status); -} - /** * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) * @adapter: board private structure @@ -771,7 +742,6 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) { struct igbvf_adapter *adapter = tx_ring->adapter; - struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct igbvf_buffer *buffer_info; struct sk_buff *skb; @@ -832,22 +802,6 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) } } - if (adapter->detect_tx_hung) { - /* Detect a transmit hang in hardware, this serializes the - * check with the clearing of time_stamp and movement of i */ - adapter->detect_tx_hung = false; - if (tx_ring->buffer_info[i].time_stamp && - time_after(jiffies, tx_ring->buffer_info[i].time_stamp + - (adapter->tx_timeout_factor * HZ)) && - !(er32(STATUS) & E1000_STATUS_TXOFF)) { - - tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); - /* detected Tx unit hang */ - igbvf_print_tx_hang(adapter); - - netif_stop_queue(netdev); - } - } adapter->net_stats.tx_bytes += total_bytes; adapter->net_stats.tx_packets += total_packets; return count < tx_ring->count; @@ -1863,17 +1817,6 @@ static void igbvf_watchdog_task(struct work_struct *work) &adapter->link_duplex); igbvf_print_link_info(adapter); - /* adjust timeout factor according to speed/duplex */ - adapter->tx_timeout_factor = 1; - switch (adapter->link_speed) { - case SPEED_10: - adapter->tx_timeout_factor = 16; - break; - case SPEED_100: - /* maybe add some timeout factor ? */ - break; - } - netif_carrier_on(netdev); netif_wake_queue(netdev); } @@ -1907,9 +1850,6 @@ static void igbvf_watchdog_task(struct work_struct *work) /* Cause software interrupt to ensure Rx ring is cleaned */ ew32(EICS, adapter->rx_ring->eims_value); - /* Force detection of hung controller every watchdog period */ - adapter->detect_tx_hung = 1; - /* Reset the timer */ if (!test_bit(__IGBVF_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, @@ -2699,8 +2639,7 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, hw->device_id = pdev->device; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; - - pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->revision_id = pdev->revision; err = -EIO; adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index aa93655c3aa7..a5b0f0e194bb 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c @@ -2025,7 +2025,6 @@ static void ipg_init_mii(struct net_device *dev) if (phyaddr != 0x1f) { u16 mii_phyctrl, mii_1000cr; - u8 revisionid = 0; mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | @@ -2035,8 +2034,7 @@ static void ipg_init_mii(struct net_device *dev) mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); /* Set default phyparam */ - pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid); - ipg_set_phy_default_param(revisionid, dev, phyaddr); + ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr); /* Reset PHY */ mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h index 521c0c732998..8f3df044e81e 100644 --- a/drivers/net/ixgb/ixgb.h +++ b/drivers/net/ixgb/ixgb.h @@ -149,7 +149,7 @@ struct ixgb_desc_ring { struct ixgb_adapter { struct timer_list watchdog_timer; - struct vlan_group *vlgrp; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; u32 bd_number; u32 rx_buffer_len; u32 part_num; diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index 43994c199991..cc53aa1541ba 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c @@ -706,6 +706,43 @@ ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } } +static int ixgb_set_flags(struct net_device *netdev, u32 data) +{ + struct ixgb_adapter *adapter = netdev_priv(netdev); + bool need_reset; + int rc; + + /* + * Tx VLAN insertion does not work per HW design when Rx stripping is + * disabled. Disable txvlan when rxvlan is turned off, and enable + * rxvlan when txvlan is turned on. + */ + if (!(data & ETH_FLAG_RXVLAN) && + (netdev->features & NETIF_F_HW_VLAN_TX)) + data &= ~ETH_FLAG_TXVLAN; + else if (data & ETH_FLAG_TXVLAN) + data |= ETH_FLAG_RXVLAN; + + need_reset = (data & ETH_FLAG_RXVLAN) != + (netdev->features & NETIF_F_HW_VLAN_RX); + + rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN | + ETH_FLAG_TXVLAN); + if (rc) + return rc; + + if (need_reset) { + if (netif_running(netdev)) { + ixgb_down(adapter, true); + ixgb_up(adapter); + ixgb_set_speed_duplex(netdev); + } else + ixgb_reset(adapter); + } + + return 0; +} + static const struct ethtool_ops ixgb_ethtool_ops = { .get_settings = ixgb_get_settings, .set_settings = ixgb_set_settings, @@ -732,6 +769,8 @@ static const struct ethtool_ops ixgb_ethtool_ops = { .phys_id = ixgb_phys_id, .get_sset_count = ixgb_get_sset_count, .get_ethtool_stats = ixgb_get_ethtool_stats, + .get_flags = ethtool_op_get_flags, + .set_flags = ixgb_set_flags, }; void ixgb_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 5639cccb4935..0f681ac2da8d 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -100,8 +100,6 @@ static void ixgb_tx_timeout_task(struct work_struct *work); static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter); static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter); -static void ixgb_vlan_rx_register(struct net_device *netdev, - struct vlan_group *grp); static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); static void ixgb_restore_vlan(struct ixgb_adapter *adapter); @@ -336,7 +334,6 @@ static const struct net_device_ops ixgb_netdev_ops = { .ndo_set_mac_address = ixgb_set_mac, .ndo_change_mtu = ixgb_change_mtu, .ndo_tx_timeout = ixgb_tx_timeout, - .ndo_vlan_rx_register = ixgb_vlan_rx_register, .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1508,7 +1505,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) DESC_NEEDED))) return NETDEV_TX_BUSY; - if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + if (vlan_tx_tag_present(skb)) { tx_flags |= IXGB_TX_FLAGS_VLAN; vlan_id = vlan_tx_tag_get(skb); } @@ -2049,12 +2046,11 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) ixgb_rx_checksum(adapter, rx_desc, skb); skb->protocol = eth_type_trans(skb, netdev); - if (adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) { - vlan_hwaccel_receive_skb(skb, adapter->vlgrp, - le16_to_cpu(rx_desc->special)); - } else { - netif_receive_skb(skb); - } + if (status & IXGB_RX_DESC_STATUS_VP) + __vlan_hwaccel_put_tag(skb, + le16_to_cpu(rx_desc->special)); + + netif_receive_skb(skb); rxdesc_done: /* clean up descriptor, might be written over by hw */ @@ -2152,20 +2148,6 @@ map_skb: } } -/** - * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping. - * - * @param netdev network interface device structure - * @param grp indicates to enable or disable tagging/stripping - **/ -static void -ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) -{ - struct ixgb_adapter *adapter = netdev_priv(netdev); - - adapter->vlgrp = grp; -} - static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter) { @@ -2200,6 +2182,7 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); vfta |= (1 << (vid & 0x1F)); ixgb_write_vfta(&adapter->hw, index, vfta); + set_bit(vid, adapter->active_vlans); } static void @@ -2208,35 +2191,22 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) struct ixgb_adapter *adapter = netdev_priv(netdev); u32 vfta, index; - ixgb_irq_disable(adapter); - - vlan_group_set_device(adapter->vlgrp, vid, NULL); - - /* don't enable interrupts unless we are UP */ - if (adapter->netdev->flags & IFF_UP) - ixgb_irq_enable(adapter); - /* remove VID from filter table */ index = (vid >> 5) & 0x7F; vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); vfta &= ~(1 << (vid & 0x1F)); ixgb_write_vfta(&adapter->hw, index, vfta); + clear_bit(vid, adapter->active_vlans); } static void ixgb_restore_vlan(struct ixgb_adapter *adapter) { - ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); - - if (adapter->vlgrp) { - u16 vid; - for (vid = 0; vid < VLAN_N_VID; vid++) { - if (!vlan_group_get_device(adapter->vlgrp, vid)) - continue; - ixgb_vlan_rx_add_vid(adapter->netdev, vid); - } - } + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + ixgb_vlan_rx_add_vid(adapter->netdev, vid); } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 3b8c92463617..8d468028bb55 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -118,6 +118,7 @@ struct vf_data_storage { bool pf_set_mac; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; + u16 tx_rate; }; /* wrapper around a pointer to a socket buffer, @@ -209,6 +210,7 @@ struct ixgbe_ring { * associated with this ring, which is * different for DCB and RSS modes */ + u8 dcb_tc; u16 work_limit; /* max work per interrupt */ @@ -243,7 +245,7 @@ enum ixgbe_ring_f_enum { RING_F_ARRAY_SIZE /* must be last in enum set */ }; -#define IXGBE_MAX_DCB_INDICES 8 +#define IXGBE_MAX_DCB_INDICES 64 #define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_VMDQ_INDICES 64 #define IXGBE_MAX_FDIR_INDICES 64 @@ -334,9 +336,14 @@ struct ixgbe_adapter { u16 bd_number; struct work_struct reset_task; struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + + /* DCB parameters */ + struct ieee_pfc *ixgbe_ieee_pfc; + struct ieee_ets *ixgbe_ieee_ets; struct ixgbe_dcb_config dcb_cfg; struct ixgbe_dcb_config temp_dcb_cfg; u8 dcb_set_bitmap; + u8 dcbx_cap; enum ixgbe_fc_mode last_lfc_mode; /* Interrupt Throttle Rate */ @@ -462,6 +469,7 @@ struct ixgbe_adapter { DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); unsigned int num_vfs; struct vf_data_storage *vfinfo; + int vf_rate_link_speed; }; enum ixbge_state_t { @@ -521,7 +529,6 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); extern void ixgbe_write_eitr(struct ixgbe_q_vector *); extern int ethtool_ioctl(struct ifreq *ifr); -extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index); extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); @@ -538,6 +545,7 @@ extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring); extern void ixgbe_set_rx_mode(struct net_device *netdev); +extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); #ifdef IXGBE_FCOE extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fso(struct ixgbe_adapter *adapter, @@ -549,6 +557,8 @@ extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, struct sk_buff *skb); extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc); +extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc); extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); extern int ixgbe_fcoe_enable(struct net_device *netdev); extern int ixgbe_fcoe_disable(struct net_device *netdev); diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index d0f1d9d2c416..845c679c8b87 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -158,6 +158,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) switch (hw->phy.type) { case ixgbe_phy_tn: + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; phy->ops.check_link = &ixgbe_check_phy_link_tnx; phy->ops.get_firmware_version = &ixgbe_get_phy_firmware_version_tnx; @@ -280,10 +281,22 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) { enum ixgbe_media_type media_type; + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + case ixgbe_phy_aq: + media_type = ixgbe_media_type_copper; + goto out; + default: + break; + } + /* Media type for I82598 is based on device ID */ switch (hw->device_id) { case IXGBE_DEV_ID_82598: case IXGBE_DEV_ID_82598_BX: + /* Default device ID is mezzanine card KX/KX4 */ media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_82598AF_DUAL_PORT: @@ -306,7 +319,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) media_type = ixgbe_media_type_unknown; break; } - +out: return media_type; } @@ -354,7 +367,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) /* Negotiate the fc mode to use */ ret_val = ixgbe_fc_autoneg(hw); - if (ret_val) + if (ret_val == IXGBE_ERR_FLOW_CONTROL) goto out; /* Disable any previous flow control settings */ @@ -372,10 +385,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. - * other: Invalid. #ifdef CONFIG_DCB * 4: Priority Flow Control is enabled. #endif + * other: Invalid. */ switch (hw->fc.current_mode) { case ixgbe_fc_none: @@ -432,9 +445,10 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) reg = (rx_pba_size - hw->fc.low_water) << 6; if (hw->fc.send_xon) reg |= IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg); - reg = (rx_pba_size - hw->fc.high_water) << 10; + reg = (rx_pba_size - hw->fc.high_water) << 6; reg |= IXGBE_FCRTH_FCEN; IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg); @@ -627,13 +641,12 @@ out: return 0; } - /** * ixgbe_setup_mac_link_82598 - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed * @autoneg: true if auto-negotiation enabled - * @autoneg_wait_to_complete: true if waiting is needed to complete + * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ @@ -672,7 +685,8 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, * ixgbe_hw This will write the AUTOC register based on the new * stored values */ - status = ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); + status = ixgbe_start_mac_link_82598(hw, + autoneg_wait_to_complete); } return status; @@ -698,7 +712,6 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, autoneg_wait_to_complete); - /* Set up MAC */ ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); @@ -770,7 +783,6 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) else if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) goto no_phy_reset; - hw->phy.ops.reset(hw); } @@ -779,12 +791,9 @@ no_phy_reset: * Prevent the PCI-E bus from from hanging by disabling PCI-E master * access and verify no pending requests before reset */ - status = ixgbe_disable_pcie_master(hw); - if (status != 0) { - status = IXGBE_ERR_MASTER_REQUESTS_PENDING; - hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); - } + ixgbe_disable_pcie_master(hw); +mac_reset_top: /* * Issue global reset to the MAC. This needs to be a SW reset. * If link reset is used, it might reset the MAC when mng is using it @@ -805,6 +814,19 @@ no_phy_reset: hw_dbg(hw, "Reset polling failed to complete.\n"); } + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. We use 1usec since that is + * what is needed for ixgbe_disable_pcie_master(). The second reset + * then clears out any effects of those events. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + udelay(1); + goto mac_reset_top; + } + msleep(50); gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); @@ -824,15 +846,15 @@ no_phy_reset: IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); } + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table */ hw->mac.ops.init_rx_addrs(hw); - /* Store the permanent mac address */ - hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); - reset_hw_out: if (phy_status) status = phy_status; @@ -849,6 +871,13 @@ reset_hw_out: static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); rar_high &= ~IXGBE_RAH_VIND_MASK; @@ -868,14 +897,17 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; - if (rar < rar_entries) { - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); - if (rar_high & IXGBE_RAH_VIND_MASK) { - rar_high &= ~IXGBE_RAH_VIND_MASK; - IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); - } - } else { + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + if (rar_high & IXGBE_RAH_VIND_MASK) { + rar_high &= ~IXGBE_RAH_VIND_MASK; + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); } return 0; @@ -994,13 +1026,12 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) } /** - * ixgbe_read_i2c_eeprom_82598 - Read 8 bit EEPROM word of an SFP+ module - * over I2C interface through an intermediate phy. + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. * @hw: pointer to hardware structure * @byte_offset: EEPROM byte offset to read * @eeprom_data: value read * - * Performs byte read operation to SFP module's EEPROM over I2C interface. + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. **/ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) @@ -1074,10 +1105,12 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) /* Copper PHY must be checked before AUTOC LMS to determine correct * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ - if (hw->phy.type == ixgbe_phy_tn || - hw->phy.type == ixgbe_phy_cu_unknown) { - hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, - &ext_ability); + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_aq: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, + MDIO_MMD_PMAPMD, &ext_ability); if (ext_ability & MDIO_PMA_EXTABLE_10GBT) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; if (ext_ability & MDIO_PMA_EXTABLE_1000BT) @@ -1085,6 +1118,8 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) if (ext_ability & MDIO_PMA_EXTABLE_100BTX) physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; goto out; + default: + break; } switch (autoc & IXGBE_AUTOC_LMS_MASK) { @@ -1179,13 +1214,14 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .set_vmdq = &ixgbe_set_vmdq_82598, .clear_vmdq = &ixgbe_clear_vmdq_82598, .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic, .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, .enable_mc = &ixgbe_enable_mc_generic, .disable_mc = &ixgbe_disable_mc_generic, .clear_vfta = &ixgbe_clear_vfta_82598, .set_vfta = &ixgbe_set_vfta_82598, .fc_enable = &ixgbe_fc_enable_82598, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, + .release_swfw_sync = &ixgbe_release_swfw_sync, }; static struct ixgbe_eeprom_operations eeprom_ops_82598 = { diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index a21f5817685b..00aeba385a2f 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -112,7 +112,8 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) goto setup_sfp_out; /* PHY config will finish before releasing the semaphore */ - ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); if (ret_val != 0) { ret_val = IXGBE_ERR_SWFW_SYNC; goto setup_sfp_out; @@ -329,11 +330,14 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) enum ixgbe_media_type media_type; /* Detect if there is a copper PHY attached. */ - if (hw->phy.type == ixgbe_phy_cu_unknown || - hw->phy.type == ixgbe_phy_tn || - hw->phy.type == ixgbe_phy_aq) { + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + case ixgbe_phy_aq: media_type = ixgbe_media_type_copper; goto out; + default: + break; } switch (hw->device_id) { @@ -354,6 +358,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_CX4: media_type = ixgbe_media_type_cx4; break; + case IXGBE_DEV_ID_82599_T3_LOM: + media_type = ixgbe_media_type_copper; + break; default: media_type = ixgbe_media_type_unknown; break; @@ -411,14 +418,14 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, return status; } - /** - * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser - * @hw: pointer to hardware structure - * - * The base drivers may require better control over SFP+ module - * PHY states. This includes selectively shutting down the Tx - * laser on the PHY, effectively halting physical link. - **/ +/** + * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. + **/ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); @@ -463,8 +470,6 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) **/ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { - hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); - if (hw->mac.autotry_restart) { ixgbe_disable_tx_laser_multispeed_fiber(hw); ixgbe_enable_tx_laser_multispeed_fiber(hw); @@ -487,17 +492,21 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { s32 status = 0; - ixgbe_link_speed phy_link_speed; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; u32 speedcnt = 0; u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + u32 i = 0; bool link_up = false; bool negotiation; - int i; /* Mask off requested but non-supported speeds */ - hw->mac.ops.get_link_capabilities(hw, &phy_link_speed, &negotiation); - speed &= phy_link_speed; + status = hw->mac.ops.get_link_capabilities(hw, &link_speed, + &negotiation); + if (status != 0) + return status; + + speed &= link_speed; /* * Try each speed one by one, highest priority first. We do this in @@ -508,9 +517,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; /* If we already have link at this speed, just jump out */ - hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status != 0) + return status; - if ((phy_link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) + if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) goto out; /* Set the module link speed */ @@ -522,9 +534,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, msleep(40); status = ixgbe_setup_mac_link_82599(hw, - IXGBE_LINK_SPEED_10GB_FULL, - autoneg, - autoneg_wait_to_complete); + IXGBE_LINK_SPEED_10GB_FULL, + autoneg, + autoneg_wait_to_complete); if (status != 0) return status; @@ -536,14 +548,16 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, * Section 73.10.2, we may have to wait up to 500ms if KR is * attempted. 82599 uses the same timing for 10g SFI. */ - for (i = 0; i < 5; i++) { /* Wait for the link partner to also set speed */ msleep(100); /* If we have link, just jump out */ - hw->mac.ops.check_link(hw, &phy_link_speed, - &link_up, false); + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status != 0) + return status; + if (link_up) goto out; } @@ -555,9 +569,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; /* If we already have link at this speed, just jump out */ - hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status != 0) + return status; - if ((phy_link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) + if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) goto out; /* Set the module link speed */ @@ -570,9 +587,9 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, msleep(40); status = ixgbe_setup_mac_link_82599(hw, - IXGBE_LINK_SPEED_1GB_FULL, - autoneg, - autoneg_wait_to_complete); + IXGBE_LINK_SPEED_1GB_FULL, + autoneg, + autoneg_wait_to_complete); if (status != 0) return status; @@ -583,7 +600,11 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, msleep(100); /* If we have link, just jump out */ - hw->mac.ops.check_link(hw, &phy_link_speed, &link_up, false); + status = hw->mac.ops.check_link(hw, &link_speed, &link_up, + false); + if (status != 0) + return status; + if (link_up) goto out; } @@ -626,13 +647,10 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { s32 status = 0; - ixgbe_link_speed link_speed; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; s32 i, j; bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - struct ixgbe_adapter *adapter = hw->back; - - hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n"); /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; @@ -658,7 +676,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, autoneg_wait_to_complete); - if (status) + if (status != 0) goto out; /* @@ -671,8 +689,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, mdelay(100); /* If we have link, just jump out */ - hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status != 0) + goto out; + if (link_up) goto out; } @@ -690,7 +711,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, hw->phy.smart_speed_active = true; status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, autoneg_wait_to_complete); - if (status) + if (status != 0) goto out; /* @@ -703,8 +724,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, mdelay(100); /* If we have link, just jump out */ - hw->mac.ops.check_link(hw, &link_speed, - &link_up, false); + status = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + if (status != 0) + goto out; + if (link_up) goto out; } @@ -716,7 +740,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, out: if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) - e_info(hw, "Smartspeed has downgraded the link speed from " + hw_dbg(hw, "Smartspeed has downgraded the link speed from " "the maximum advertised\n"); return status; } @@ -748,6 +772,9 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, /* Check to see if speed passed in is supported. */ hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); + if (status != 0) + goto out; + speed &= link_capabilities; if (speed == IXGBE_LINK_SPEED_UNKNOWN) { @@ -761,7 +788,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, else orig_autoc = autoc; - if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { @@ -878,7 +904,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) /* PHY ops must be identified and initialized prior to reset */ - /* Init PHY and function pointers, perform SFP setup */ + /* Identify PHY and related function pointers */ status = hw->phy.ops.init(hw); if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) @@ -890,6 +916,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) hw->phy.sfp_setup_needed = false; } + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + /* Reset PHY */ if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) hw->phy.ops.reset(hw); @@ -898,12 +927,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) * Prevent the PCI-E bus from from hanging by disabling PCI-E master * access and verify no pending requests before reset */ - status = ixgbe_disable_pcie_master(hw); - if (status != 0) { - status = IXGBE_ERR_MASTER_REQUESTS_PENDING; - hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); - } + ixgbe_disable_pcie_master(hw); +mac_reset_top: /* * Issue global reset to the MAC. This needs to be a SW reset. * If link reset is used, it might reset the MAC when mng is using it @@ -924,6 +950,19 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) hw_dbg(hw, "Reset polling failed to complete.\n"); } + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. We use 1usec since that is + * what is needed for ixgbe_disable_pcie_master(). The second reset + * then clears out any effects of those events. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + udelay(1); + goto mac_reset_top; + } + msleep(50); /* @@ -951,6 +990,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) } } + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, @@ -959,9 +1001,6 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); - /* Store the permanent mac address */ - hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); - /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); @@ -1733,13 +1772,34 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * * Determines the physical layer module found on the current adapter. + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. **/ -static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + + /* Detect PHY if not unknown - returns success if already detected. */ status = ixgbe_identify_phy_generic(hw); - if (status != 0) - status = ixgbe_identify_sfp_module_generic(hw); + if (status != 0) { + /* 82599 10GBASE-T requires an external PHY */ + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) + goto out; + else + status = ixgbe_identify_sfp_module_generic(hw); + } + + /* Set PHY type none if no PHY detected */ + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.type = ixgbe_phy_none; + status = 0; + } + + /* Return error if SFP module has been detected but is not supported */ + if (hw->phy.type == ixgbe_phy_sfp_unsupported) + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + +out: return status; } @@ -1763,11 +1823,12 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) hw->phy.ops.identify(hw); - if (hw->phy.type == ixgbe_phy_tn || - hw->phy.type == ixgbe_phy_aq || - hw->phy.type == ixgbe_phy_cu_unknown) { + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_aq: + case ixgbe_phy_cu_unknown: hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, - &ext_ability); + &ext_ability); if (ext_ability & MDIO_PMA_EXTABLE_10GBT) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; if (ext_ability & MDIO_PMA_EXTABLE_1000BT) @@ -1775,6 +1836,8 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) if (ext_ability & MDIO_PMA_EXTABLE_100BTX) physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; goto out; + default: + break; } switch (autoc & IXGBE_AUTOC_LMS_MASK) { @@ -1886,6 +1949,7 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) break; else + /* Use interrupt-safe sleep just in case */ udelay(10); } @@ -1995,7 +2059,6 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .set_vmdq = &ixgbe_set_vmdq_generic, .clear_vmdq = &ixgbe_clear_vmdq_generic, .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic, .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, .enable_mc = &ixgbe_enable_mc_generic, .disable_mc = &ixgbe_disable_mc_generic, @@ -2006,31 +2069,34 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .setup_sfp = &ixgbe_setup_sfp_modules_82599, .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, + .release_swfw_sync = &ixgbe_release_swfw_sync, + }; static struct ixgbe_eeprom_operations eeprom_ops_82599 = { - .init_params = &ixgbe_init_eeprom_params_generic, - .read = &ixgbe_read_eerd_generic, - .write = &ixgbe_write_eeprom_generic, - .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, - .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, - .update_checksum = &ixgbe_update_eeprom_checksum_generic, + .init_params = &ixgbe_init_eeprom_params_generic, + .read = &ixgbe_read_eerd_generic, + .write = &ixgbe_write_eeprom_generic, + .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, + .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, + .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; static struct ixgbe_phy_operations phy_ops_82599 = { - .identify = &ixgbe_identify_phy_82599, - .identify_sfp = &ixgbe_identify_sfp_module_generic, - .init = &ixgbe_init_phy_ops_82599, - .reset = &ixgbe_reset_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .setup_link = &ixgbe_setup_phy_link_generic, - .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, - .read_i2c_byte = &ixgbe_read_i2c_byte_generic, - .write_i2c_byte = &ixgbe_write_i2c_byte_generic, - .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, - .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, - .check_overtemp = &ixgbe_tn_check_overtemp, + .identify = &ixgbe_identify_phy_82599, + .identify_sfp = &ixgbe_identify_sfp_module_generic, + .init = &ixgbe_init_phy_ops_82599, + .reset = &ixgbe_reset_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, + .setup_link = &ixgbe_setup_phy_link_generic, + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, + .read_i2c_byte = &ixgbe_read_i2c_byte_generic, + .write_i2c_byte = &ixgbe_write_i2c_byte_generic, + .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, + .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, + .check_overtemp = &ixgbe_tn_check_overtemp, }; struct ixgbe_info ixgbe_82599_info = { diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index ebbda7d15254..bcd952916eb2 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -46,10 +46,13 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_release_eeprom(struct ixgbe_hw *hw); -static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); -static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); -static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw); +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw); +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw); +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); /** @@ -139,17 +142,29 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) IXGBE_READ_REG(hw, IXGBE_MRFC); IXGBE_READ_REG(hw, IXGBE_RLEC); IXGBE_READ_REG(hw, IXGBE_LXONTXC); - IXGBE_READ_REG(hw, IXGBE_LXONRXC); IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); - IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } else { + IXGBE_READ_REG(hw, IXGBE_LXONRXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + } for (i = 0; i < 8; i++) { IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); - IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); - IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } } - + if (hw->mac.type >= ixgbe_mac_82599EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); IXGBE_READ_REG(hw, IXGBE_PRC64); IXGBE_READ_REG(hw, IXGBE_PRC127); IXGBE_READ_REG(hw, IXGBE_PRC255); @@ -187,9 +202,26 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) IXGBE_READ_REG(hw, IXGBE_BPTC); for (i = 0; i < 16; i++) { IXGBE_READ_REG(hw, IXGBE_QPRC(i)); - IXGBE_READ_REG(hw, IXGBE_QBRC(i)); IXGBE_READ_REG(hw, IXGBE_QPTC(i)); - IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + } + } + + if (hw->mac.type == ixgbe_mac_X540) { + if (hw->phy.id == 0) + hw->phy.ops.identify(hw); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i); } return 0; @@ -454,8 +486,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) * Prevent the PCI-E bus from from hanging by disabling PCI-E master * access and verify no pending requests */ - if (ixgbe_disable_pcie_master(hw) != 0) - hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); + ixgbe_disable_pcie_master(hw); return 0; } @@ -603,7 +634,6 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) ixgbe_shift_out_eeprom_bits(hw, data, 16); ixgbe_standby_eeprom(hw); - msleep(hw->eeprom.semaphore_delay); /* Done with writing - release the EEPROM */ ixgbe_release_eeprom(hw); } @@ -747,10 +777,10 @@ s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) { s32 status = 0; - u32 eec = 0; + u32 eec; u32 i; - if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) status = IXGBE_ERR_SWFW_SYNC; if (status == 0) { @@ -773,18 +803,18 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); hw_dbg(hw, "Could not acquire EEPROM grant\n"); - ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); status = IXGBE_ERR_EEPROM; } - } - /* Setup EEPROM for Read/Write */ - if (status == 0) { - /* Clear CS and SK */ - eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); - IXGBE_WRITE_FLUSH(hw); - udelay(1); + /* Setup EEPROM for Read/Write */ + if (status == 0) { + /* Clear CS and SK */ + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + udelay(1); + } } return status; } @@ -798,13 +828,10 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_EEPROM; - u32 timeout; + u32 timeout = 2000; u32 i; u32 swsm; - /* Set timeout value based on size of EEPROM */ - timeout = hw->eeprom.word_size + 1; - /* Get SMBI software semaphore between device drivers first */ for (i = 0; i < timeout; i++) { /* @@ -816,7 +843,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) status = 0; break; } - msleep(1); + udelay(50); } /* Now get the semaphore between SW/FW through the SWESMBI bit */ @@ -844,11 +871,14 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) * was not granted because we don't have access to the EEPROM */ if (i >= timeout) { - hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " + hw_dbg(hw, "SWESMBI Software EEPROM semaphore " "not granted.\n"); ixgbe_release_eeprom_semaphore(hw); status = IXGBE_ERR_EEPROM; } + } else { + hw_dbg(hw, "Software semaphore SMBI between device drivers " + "not granted.\n"); } return status; @@ -1080,11 +1110,14 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) eec &= ~IXGBE_EEC_REQ; IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); - ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + /* Delay before attempt to obtain semaphore again to allow FW access */ + msleep(hw->eeprom.semaphore_delay); } /** - * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum + * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum * @hw: pointer to hardware structure **/ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) @@ -1190,7 +1223,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) if (status == 0) { checksum = hw->eeprom.ops.calc_checksum(hw); status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, - checksum); + checksum); } else { hw_dbg(hw, "EEPROM read failed\n"); } @@ -1238,37 +1271,37 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 rar_low, rar_high; u32 rar_entries = hw->mac.num_rar_entries; + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + /* setup VMDq pool selection before this RAR gets enabled */ hw->mac.ops.set_vmdq(hw, index, vmdq); - /* Make sure we are using a valid rar index range */ - if (index < rar_entries) { - /* - * HW expects these in little endian so we reverse the byte - * order from network order (big endian) to little endian - */ - rar_low = ((u32)addr[0] | - ((u32)addr[1] << 8) | - ((u32)addr[2] << 16) | - ((u32)addr[3] << 24)); - /* - * Some parts put the VMDq setting in the extra RAH bits, - * so save everything except the lower 16 bits that hold part - * of the address and the address valid bit. - */ - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); - rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); - rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); - if (enable_addr != 0) - rar_high |= IXGBE_RAH_AV; + if (enable_addr != 0) + rar_high |= IXGBE_RAH_AV; - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); - } else { - hw_dbg(hw, "RAR index %d is out of range.\n", index); - return IXGBE_ERR_RAR_INDEX; - } + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); return 0; } @@ -1286,58 +1319,26 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) u32 rar_entries = hw->mac.num_rar_entries; /* Make sure we are using a valid rar index range */ - if (index < rar_entries) { - /* - * Some parts put the VMDq setting in the extra RAH bits, - * so save everything except the lower 16 bits that hold part - * of the address and the address valid bit. - */ - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); - rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); - - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); - } else { + if (index >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", index); - return IXGBE_ERR_RAR_INDEX; + return IXGBE_ERR_INVALID_ARGUMENT; } - /* clear VMDq pool/queue selection for this RAR */ - hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); - - return 0; -} - -/** - * ixgbe_enable_rar - Enable Rx address register - * @hw: pointer to hardware structure - * @index: index into the RAR table - * - * Enables the select receive address register. - **/ -static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index) -{ - u32 rar_high; - + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); - rar_high |= IXGBE_RAH_AV; + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); -} -/** - * ixgbe_disable_rar - Disable Rx address register - * @hw: pointer to hardware structure - * @index: index into the RAR table - * - * Disables the select receive address register. - **/ -static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index) -{ - u32 rar_high; + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); - rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); - rar_high &= (~IXGBE_RAH_AV); - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + return 0; } /** @@ -1386,7 +1387,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) } /* Clear the MTA */ - hw->addr_ctrl.mc_addr_in_rar_count = 0; hw->addr_ctrl.mta_in_use = 0; IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); @@ -1401,105 +1401,6 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) } /** - * ixgbe_add_uc_addr - Adds a secondary unicast address. - * @hw: pointer to hardware structure - * @addr: new address - * - * Adds it to unused receive address register or goes into promiscuous mode. - **/ -static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) -{ - u32 rar_entries = hw->mac.num_rar_entries; - u32 rar; - - hw_dbg(hw, " UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); - - /* - * Place this address in the RAR if there is room, - * else put the controller into promiscuous mode - */ - if (hw->addr_ctrl.rar_used_count < rar_entries) { - rar = hw->addr_ctrl.rar_used_count - - hw->addr_ctrl.mc_addr_in_rar_count; - hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); - hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar); - hw->addr_ctrl.rar_used_count++; - } else { - hw->addr_ctrl.overflow_promisc++; - } - - hw_dbg(hw, "ixgbe_add_uc_addr Complete\n"); -} - -/** - * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses - * @hw: pointer to hardware structure - * @netdev: pointer to net device structure - * - * The given list replaces any existing list. Clears the secondary addrs from - * receive address registers. Uses unused receive address registers for the - * first secondary addresses, and falls back to promiscuous mode as needed. - * - * Drivers using secondary unicast addresses must set user_set_promisc when - * manually putting the device into promiscuous mode. - **/ -s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, - struct net_device *netdev) -{ - u32 i; - u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; - u32 uc_addr_in_use; - u32 fctrl; - struct netdev_hw_addr *ha; - - /* - * Clear accounting of old secondary address list, - * don't count RAR[0] - */ - uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; - hw->addr_ctrl.rar_used_count -= uc_addr_in_use; - hw->addr_ctrl.overflow_promisc = 0; - - /* Zero out the other receive addresses */ - hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1); - for (i = 0; i < uc_addr_in_use; i++) { - IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); - IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); - } - - /* Add the new addresses */ - netdev_for_each_uc_addr(ha, netdev) { - hw_dbg(hw, " Adding the secondary addresses:\n"); - ixgbe_add_uc_addr(hw, ha->addr, 0); - } - - if (hw->addr_ctrl.overflow_promisc) { - /* enable promisc if not already in overflow or set by user */ - if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { - hw_dbg(hw, " Entering address overflow promisc mode\n"); - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - fctrl |= IXGBE_FCTRL_UPE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - hw->addr_ctrl.uc_set_promisc = true; - } - } else { - /* only disable if set by overflow, not by user */ - if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) && - !(hw->addr_ctrl.user_set_promisc)) { - hw_dbg(hw, " Leaving address overflow promisc mode\n"); - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - fctrl &= ~IXGBE_FCTRL_UPE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - hw->addr_ctrl.uc_set_promisc = false; - } - } - - hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n"); - return 0; -} - -/** * ixgbe_mta_vector - Determines bit-vector in multicast table to set * @hw: pointer to hardware structure * @mc_addr: the multicast address @@ -1550,7 +1451,6 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) u32 vector; u32 vector_bit; u32 vector_reg; - u32 mta_reg; hw->addr_ctrl.mta_in_use++; @@ -1568,9 +1468,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) */ vector_reg = (vector >> 5) & 0x7F; vector_bit = vector & 0x1F; - mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= (1 << vector_bit); - IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); } /** @@ -1596,18 +1494,21 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); hw->addr_ctrl.mta_in_use = 0; - /* Clear the MTA */ + /* Clear mta_shadow */ hw_dbg(hw, " Clearing MTA\n"); - for (i = 0; i < hw->mac.mcft_size; i++) - IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); - /* Add the new addresses */ + /* Update mta shadow */ netdev_for_each_mc_addr(ha, netdev) { hw_dbg(hw, " Adding the multicast addresses:\n"); ixgbe_set_mta(hw, ha->addr); } /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, + hw->mac.mta_shadow[i]); + if (hw->addr_ctrl.mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); @@ -1624,15 +1525,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, **/ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) { - u32 i; - u32 rar_entries = hw->mac.num_rar_entries; struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; - if (a->mc_addr_in_rar_count > 0) - for (i = (rar_entries - a->mc_addr_in_rar_count); - i < rar_entries; i++) - ixgbe_enable_rar(hw, i); - if (a->mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); @@ -1648,15 +1542,8 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) { - u32 i; - u32 rar_entries = hw->mac.num_rar_entries; struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; - if (a->mc_addr_in_rar_count > 0) - for (i = (rar_entries - a->mc_addr_in_rar_count); - i < rar_entries; i++) - ixgbe_disable_rar(hw, i); - if (a->mta_in_use > 0) IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); @@ -1685,7 +1572,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) #endif /* CONFIG_DCB */ /* Negotiate the fc mode to use */ ret_val = ixgbe_fc_autoneg(hw); - if (ret_val) + if (ret_val == IXGBE_ERR_FLOW_CONTROL) goto out; /* Disable any previous flow control settings */ @@ -1703,7 +1590,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) * 2: Tx flow control is enabled (we can send pause frames but * we do not support receiving pause frames). * 3: Both Rx and Tx flow control (symmetric) are enabled. +#ifdef CONFIG_DCB * 4: Priority Flow Control is enabled. +#endif * other: Invalid. */ switch (hw->fc.current_mode) { @@ -1791,12 +1680,13 @@ out: **/ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) { - s32 ret_val = 0; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; ixgbe_link_speed speed; - u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; - u32 links2, anlp1_reg, autoc_reg, links; bool link_up; + if (hw->fc.disable_fc_autoneg) + goto out; + /* * AN should have completed when the cable was plugged in. * Look for reasons to bail out. Bail out if: @@ -1807,153 +1697,199 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) * So use link_up_wait_to_complete=false. */ hw->mac.ops.check_link(hw, &speed, &link_up, false); - - if (hw->fc.disable_fc_autoneg || (!link_up)) { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; + if (!link_up) { + ret_val = IXGBE_ERR_FLOW_CONTROL; goto out; } - /* - * On backplane, bail out if - * - backplane autoneg was not completed, or if - * - we are 82599 and link partner is not AN enabled - */ - if (hw->phy.media_type == ixgbe_media_type_backplane) { - links = IXGBE_READ_REG(hw, IXGBE_LINKS); - if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - goto out; - } + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case ixgbe_media_type_fiber: + if (speed == IXGBE_LINK_SPEED_1GB_FULL) + ret_val = ixgbe_fc_autoneg_fiber(hw); + break; - if (hw->mac.type == ixgbe_mac_82599EB) { - links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); - if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - goto out; - } - } + /* Autoneg flow control on backplane adapters */ + case ixgbe_media_type_backplane: + ret_val = ixgbe_fc_autoneg_backplane(hw); + break; + + /* Autoneg flow control on copper adapters */ + case ixgbe_media_type_copper: + if (ixgbe_device_supports_autoneg_fc(hw) == 0) + ret_val = ixgbe_fc_autoneg_copper(hw); + break; + + default: + break; } +out: + if (ret_val == 0) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } + return ret_val; +} + +/** + * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + * @hw: pointer to hardware structure + * + * Enable flow control according on 1 gig fiber. + **/ +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) +{ + u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; + s32 ret_val; + /* * On multispeed fiber at 1g, bail out if * - link is up but AN did not complete, or if * - link is up and AN completed but timed out */ - if (hw->phy.multispeed_fiber && (speed == IXGBE_LINK_SPEED_1GB_FULL)) { - linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); - if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || - ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { - hw->fc.fc_was_autonegged = false; - hw->fc.current_mode = hw->fc.requested_mode; - goto out; - } + + linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || + ((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; } + pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + + ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, + pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE, + IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) +{ + u32 links2, anlp1_reg, autoc_reg, links; + s32 ret_val; + /* - * Bail out on - * - copper or CX4 adapters - * - fiber adapters running at 10gig + * On backplane, bail out if + * - backplane autoneg was not completed, or if + * - we are 82599 and link partner is not AN enabled */ - if ((hw->phy.media_type == ixgbe_media_type_copper) || - (hw->phy.media_type == ixgbe_media_type_cx4) || - ((hw->phy.media_type == ixgbe_media_type_fiber) && - (speed == IXGBE_LINK_SPEED_10GB_FULL))) { + links = IXGBE_READ_REG(hw, IXGBE_LINKS); + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { hw->fc.fc_was_autonegged = false; hw->fc.current_mode = hw->fc.requested_mode; + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; goto out; } + if (hw->mac.type == ixgbe_mac_82599EB) { + links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + } /* - * Read the AN advertisement and LP ability registers and resolve + * Read the 10g AN autoc and LP ability registers and resolve * local flow control settings accordingly */ - if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && - (hw->phy.media_type != ixgbe_media_type_backplane)) { - pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); - pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); - if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE)) { - /* - * Now we need to check if the user selected Rx ONLY - * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise RX - * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. - */ - if (hw->fc.requested_mode == ixgbe_fc_full) { - hw->fc.current_mode = ixgbe_fc_full; - hw_dbg(hw, "Flow Control = FULL.\n"); - } else { - hw->fc.current_mode = ixgbe_fc_rx_pause; - hw_dbg(hw, "Flow Control=RX PAUSE only\n"); - } - } else if (!(pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) && - (pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) { - hw->fc.current_mode = ixgbe_fc_tx_pause; - hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); - } else if ((pcs_anadv_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_anadv_reg & IXGBE_PCS1GANA_ASM_PAUSE) && - !(pcs_lpab_reg & IXGBE_PCS1GANA_SYM_PAUSE) && - (pcs_lpab_reg & IXGBE_PCS1GANA_ASM_PAUSE)) { - hw->fc.current_mode = ixgbe_fc_rx_pause; - hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); - } else { - hw->fc.current_mode = ixgbe_fc_none; - hw_dbg(hw, "Flow Control = NONE.\n"); - } - } + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); - if (hw->phy.media_type == ixgbe_media_type_backplane) { + ret_val = ixgbe_negotiate_fc(hw, autoc_reg, + anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, + IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) +{ + u16 technology_ability_reg = 0; + u16 lp_technology_ability_reg = 0; + + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &technology_ability_reg); + hw->phy.ops.read_reg(hw, MDIO_AN_LPA, + MDIO_MMD_AN, + &lp_technology_ability_reg); + + return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); +} + +/** + * ixgbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) + return IXGBE_ERR_FC_NOT_NEGOTIATED; + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { /* - * Read the 10g AN autoc and LP ability registers and resolve - * local flow control settings accordingly + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. */ - autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); - - if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) && - (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE)) { - /* - * Now we need to check if the user selected Rx ONLY - * of pause frames. In this case, we had to advertise - * FULL flow control because we could not advertise RX - * ONLY. Hence, we must now check to see if we need to - * turn OFF the TRANSMISSION of PAUSE frames. - */ - if (hw->fc.requested_mode == ixgbe_fc_full) { - hw->fc.current_mode = ixgbe_fc_full; - hw_dbg(hw, "Flow Control = FULL.\n"); - } else { - hw->fc.current_mode = ixgbe_fc_rx_pause; - hw_dbg(hw, "Flow Control=RX PAUSE only\n"); - } - } else if (!(autoc_reg & IXGBE_AUTOC_SYM_PAUSE) && - (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) && - (anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) && - (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) { - hw->fc.current_mode = ixgbe_fc_tx_pause; - hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); - } else if ((autoc_reg & IXGBE_AUTOC_SYM_PAUSE) && - (autoc_reg & IXGBE_AUTOC_ASM_PAUSE) && - !(anlp1_reg & IXGBE_ANLP1_SYM_PAUSE) && - (anlp1_reg & IXGBE_ANLP1_ASM_PAUSE)) { - hw->fc.current_mode = ixgbe_fc_rx_pause; - hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); + if (hw->fc.requested_mode == ixgbe_fc_full) { + hw->fc.current_mode = ixgbe_fc_full; + hw_dbg(hw, "Flow Control = FULL.\n"); } else { - hw->fc.current_mode = ixgbe_fc_none; - hw_dbg(hw, "Flow Control = NONE.\n"); + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_tx_pause; + hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_rx_pause; + hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_none; + hw_dbg(hw, "Flow Control = NONE.\n"); } - /* Record that current_mode is the result of a successful autoneg */ - hw->fc.fc_was_autonegged = true; - -out: - return ret_val; + return 0; } /** @@ -1965,7 +1901,8 @@ out: static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) { s32 ret_val = 0; - u32 reg; + u32 reg = 0, reg_bp = 0; + u16 reg_cu = 0; #ifdef CONFIG_DCB if (hw->fc.requested_mode == ixgbe_fc_pfc) { @@ -1973,7 +1910,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) goto out; } -#endif +#endif /* CONFIG_DCB */ /* Validate the packetbuf configuration */ if (packetbuf_num < 0 || packetbuf_num > 7) { hw_dbg(hw, "Invalid packet buffer number [%d], expected range " @@ -2011,11 +1948,26 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) hw->fc.requested_mode = ixgbe_fc_full; /* - * Set up the 1G flow control advertisement registers so the HW will be - * able to do fc autoneg once the cable is plugged in. If we end up - * using 10g instead, this is harmless. + * Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do fc autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. */ - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_backplane: + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); + break; + + case ixgbe_media_type_copper: + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, ®_cu); + break; + + default: + ; + } /* * The possible values of fc.requested_mode are: @@ -2034,6 +1986,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) case ixgbe_fc_none: /* Flow control completely disabled by software override. */ reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); break; case ixgbe_fc_rx_pause: /* @@ -2045,6 +2002,11 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) * disable the adapter's ability to send PAUSE frames. */ reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); break; case ixgbe_fc_tx_pause: /* @@ -2053,10 +2015,22 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) */ reg |= (IXGBE_PCS1GANA_ASM_PAUSE); reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= (IXGBE_AUTOC_ASM_PAUSE); + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE); + } else if (hw->phy.media_type == ixgbe_media_type_copper) { + reg_cu |= (IXGBE_TAF_ASM_PAUSE); + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE); + } break; case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); break; #ifdef CONFIG_DCB case ixgbe_fc_pfc: @@ -2070,80 +2044,37 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) break; } - IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); - reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); - - /* Disable AN timeout */ - if (hw->fc.strict_ieee) - reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; + if (hw->mac.type != ixgbe_mac_X540) { + /* + * Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. + */ + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); - IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); - hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); + /* Disable AN timeout */ + if (hw->fc.strict_ieee) + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; - /* - * Set up the 10G flow control advertisement registers so the HW - * can do fc autoneg once the cable is plugged in. If we end up - * using 1g instead, this is harmless. - */ - reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); + hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); + } /* - * The possible values of fc.requested_mode are: - * 0: Flow control is completely disabled - * 1: Rx flow control is enabled (we can receive pause frames, - * but not send pause frames). - * 2: Tx flow control is enabled (we can send pause frames but - * we do not support receiving pause frames). - * 3: Both Rx and Tx flow control (symmetric) are enabled. - * other: Invalid. + * AUTOC restart handles negotiation of 1G and 10G on backplane + * and copper. There is no need to set the PCS1GCTL register. + * */ - switch (hw->fc.requested_mode) { - case ixgbe_fc_none: - /* Flow control completely disabled by software override. */ - reg &= ~(IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); - break; - case ixgbe_fc_rx_pause: - /* - * Rx Flow control is enabled and Tx Flow control is - * disabled by software override. Since there really - * isn't a way to advertise that we are capable of RX - * Pause ONLY, we will advertise that we support both - * symmetric and asymmetric Rx PAUSE. Later, we will - * disable the adapter's ability to send PAUSE frames. - */ - reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); - break; - case ixgbe_fc_tx_pause: - /* - * Tx Flow control is enabled, and Rx Flow control is - * disabled by software override. - */ - reg |= (IXGBE_AUTOC_ASM_PAUSE); - reg &= ~(IXGBE_AUTOC_SYM_PAUSE); - break; - case ixgbe_fc_full: - /* Flow control (both Rx and Tx) is enabled by SW override. */ - reg |= (IXGBE_AUTOC_SYM_PAUSE | IXGBE_AUTOC_ASM_PAUSE); - break; -#ifdef CONFIG_DCB - case ixgbe_fc_pfc: - goto out; - break; -#endif /* CONFIG_DCB */ - default: - hw_dbg(hw, "Flow control param set incorrectly\n"); - ret_val = IXGBE_ERR_CONFIG; - goto out; - break; + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); + } else if ((hw->phy.media_type == ixgbe_media_type_copper) && + (ixgbe_device_supports_autoneg_fc(hw) == 0)) { + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, reg_cu); } - /* - * AUTOC restart handles negotiation of 1G and 10G. There is - * no need to set the PCS1GCTL register. - */ - reg |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg); - hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); + hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); out: return ret_val; } @@ -2159,10 +2090,16 @@ out: **/ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) { + struct ixgbe_adapter *adapter = hw->back; u32 i; u32 reg_val; u32 number_of_queues; - s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + s32 status = 0; + u16 dev_status = 0; + + /* Just jump out if bus mastering is already disabled */ + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + goto out; /* Disable the receive unit by stopping each queue */ number_of_queues = hw->mac.max_rx_queues; @@ -2179,13 +2116,43 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { - if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { - status = 0; + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + goto check_device_status; + udelay(100); + } + + hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); + status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + + /* + * Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ +check_device_status: + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS, + &dev_status); + if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) break; - } udelay(100); } + if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT) + hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); + else + goto out; + + /* + * Two consecutive resets are required via CTRL.RST per datasheet + * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine + * of this need. The first reset prevents new master requests from + * being issued by our device. We then must wait 1usec for any + * remaining completions from the PCIe bus to trickle in, and then reset + * again to clear out any effects they may have had on our device. + */ + hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + +out: return status; } @@ -2195,7 +2162,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to acquire * - * Acquires the SWFW semaphore thought the GSSR register for the specified + * Acquires the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) @@ -2206,6 +2173,10 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) s32 timeout = 200; while (timeout) { + /* + * SW EEPROM semaphore bit is used for access to all + * SW_FW_SYNC/GSSR bits (not just EEPROM) + */ if (ixgbe_get_eeprom_semaphore(hw)) return IXGBE_ERR_SWFW_SYNC; @@ -2223,7 +2194,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) } if (!timeout) { - hw_dbg(hw, "Driver can't access resource, GSSR timeout.\n"); + hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n"); return IXGBE_ERR_SWFW_SYNC; } @@ -2239,7 +2210,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) * @hw: pointer to hardware structure * @mask: Mask to specify which semaphore to release * - * Releases the SWFW semaphore thought the GSSR register for the specified + * Releases the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) @@ -2427,37 +2398,38 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) u32 mpsar_lo, mpsar_hi; u32 rar_entries = hw->mac.num_rar_entries; - if (rar < rar_entries) { - mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } - if (!mpsar_lo && !mpsar_hi) - goto done; + mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { - if (mpsar_lo) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); - mpsar_lo = 0; - } - if (mpsar_hi) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); - mpsar_hi = 0; - } - } else if (vmdq < 32) { - mpsar_lo &= ~(1 << vmdq); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); - } else { - mpsar_hi &= ~(1 << (vmdq - 32)); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); - } + if (!mpsar_lo && !mpsar_hi) + goto done; - /* was that the last pool using this rar? */ - if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) - hw->mac.ops.clear_rar(hw, rar); + if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { + if (mpsar_lo) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + mpsar_lo = 0; + } + if (mpsar_hi) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + mpsar_hi = 0; + } + } else if (vmdq < 32) { + mpsar_lo &= ~(1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); } else { - hw_dbg(hw, "RAR index %d is out of range.\n", rar); + mpsar_hi &= ~(1 << (vmdq - 32)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); } + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) + hw->mac.ops.clear_rar(hw, rar); done: return 0; } @@ -2473,18 +2445,20 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) u32 mpsar; u32 rar_entries = hw->mac.num_rar_entries; - if (rar < rar_entries) { - if (vmdq < 32) { - mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar |= 1 << vmdq; - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); - } else { - mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - mpsar |= 1 << (vmdq - 32); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); - } - } else { + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { hw_dbg(hw, "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + if (vmdq < 32) { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar |= 1 << vmdq; + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); + } else { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + mpsar |= 1 << (vmdq - 32); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); } return 0; } @@ -2497,7 +2471,6 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) { int i; - for (i = 0; i < 128; i++) IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); @@ -2726,12 +2699,21 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) * Reads the links register to determine if link is up and the current speed **/ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up, bool link_up_wait_to_complete) + bool *link_up, bool link_up_wait_to_complete) { - u32 links_reg; + u32 links_reg, links_orig; u32 i; + /* clear the old state */ + links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + + if (links_orig != links_reg) { + hw_dbg(hw, "LINKS changed from %08X to %08X\n", + links_orig, links_reg); + } + if (link_up_wait_to_complete) { for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { if (links_reg & IXGBE_LINKS_UP) { @@ -2754,10 +2736,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, IXGBE_LINKS_SPEED_10G_82599) *speed = IXGBE_LINK_SPEED_10GB_FULL; else if ((links_reg & IXGBE_LINKS_SPEED_82599) == - IXGBE_LINKS_SPEED_1G_82599) + IXGBE_LINKS_SPEED_1G_82599) *speed = IXGBE_LINK_SPEED_1GB_FULL; - else + else if ((links_reg & IXGBE_LINKS_SPEED_82599) == + IXGBE_LINKS_SPEED_100_82599) *speed = IXGBE_LINK_SPEED_100_FULL; + else + *speed = IXGBE_LINK_SPEED_UNKNOWN; /* if link is down, zero out the current_mode */ if (*link_up == false) { @@ -2814,6 +2799,28 @@ wwn_prefix_out: } /** + * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow + * control + * @hw: pointer to hardware structure + * + * There are several phys that do not support autoneg flow control. This + * function check the device id to see if the associated phy supports + * autoneg flow control. + **/ +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +{ + + switch (hw->device_id) { + case IXGBE_DEV_ID_X540T: + return 0; + case IXGBE_DEV_ID_82599_T3_LOM: + return 0; + default: + return IXGBE_ERR_FC_NOT_SUPPORTED; + } +} + +/** * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing * @hw: pointer to hardware structure * @enable: enable or disable switch for anti-spoofing diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 66ed045a8cf0..508f635fc2ca 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -29,6 +29,7 @@ #define _IXGBE_COMMON_H_ #include "ixgbe_type.h" +#include "ixgbe.h" u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); @@ -62,8 +63,6 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, struct net_device *netdev); -s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, - struct net_device *netdev); s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); @@ -110,9 +109,8 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) -extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw); #define hw_dbg(hw, format, arg...) \ - netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg) + netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg) #define e_dev_info(format, arg...) \ dev_info(&adapter->pdev->dev, format, ## arg) #define e_dev_warn(format, arg...) \ diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c index d16c260c1f50..41c529fac0ab 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ixgbe/ixgbe_dcb.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -34,6 +34,42 @@ #include "ixgbe_dcb_82599.h" /** + * ixgbe_ieee_credits - This calculates the ieee traffic class + * credits from the configured bandwidth percentages. Credits + * are the smallest unit programable into the underlying + * hardware. The IEEE 802.1Qaz specification do not use bandwidth + * groups so this is much simplified from the CEE case. + */ +s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame) +{ + int min_percent = 100; + int min_credit, multiplier; + int i; + + min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / + DCB_CREDIT_QUANTUM; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + if (bw[i] < min_percent && bw[i]) + min_percent = bw[i]; + } + + multiplier = (min_credit / min_percent) + 1; + + /* Find out the hw credits for each TC */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL); + + if (val < min_credit) + val = min_credit; + refill[i] = val; + + max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; + } + return 0; +} + +/** * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits * @ixgbe_dcb_config: Struct containing DCB settings. * @direction: Configuring either Tx or Rx. @@ -141,6 +177,59 @@ out: return ret_val; } +void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) +{ + int i; + + *pfc_en = 0; + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + *pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i; +} + +void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, + u16 *refill) +{ + struct tc_bw_alloc *p; + int i; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &cfg->tc_config[i].path[direction]; + refill[i] = p->data_credits_refill; + } +} + +void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) +{ + int i; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + max[i] = cfg->tc_config[i].desc_credits_max; +} + +void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, + u8 *bwgid) +{ + struct tc_bw_alloc *p; + int i; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &cfg->tc_config[i].path[direction]; + bwgid[i] = p->bwg_id; + } +} + +void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, + u8 *ptype) +{ + struct tc_bw_alloc *p; + int i; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &cfg->tc_config[i].path[direction]; + ptype[i] = p->prio_type; + } +} + /** * ixgbe_dcb_hw_config - Config and enable DCB * @hw: pointer to hardware structure @@ -152,13 +241,32 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { s32 ret = 0; + u8 pfc_en; + u8 ptype[MAX_TRAFFIC_CLASS]; + u8 bwgid[MAX_TRAFFIC_CLASS]; + u16 refill[MAX_TRAFFIC_CLASS]; + u16 max[MAX_TRAFFIC_CLASS]; + /* CEE does not define a priority to tc mapping so map 1:1 */ + u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; + + /* Unpack CEE standard containers */ + ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); + ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max(dcb_config, max); + ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid); + ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype); + switch (hw->mac.type) { case ixgbe_mac_82598EB: - ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); + ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg, + pfc_en, refill, max, bwgid, + ptype); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: - ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); + ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg, + pfc_en, refill, max, bwgid, + ptype, prio_tc); break; default: break; @@ -166,3 +274,49 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, return ret; } +/* Helper routines to abstract HW specifics from DCB netlink ops */ +s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en) +{ + int ret = -EINVAL; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en); + break; + default: + break; + } + return ret; +} + +s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, + u16 *refill, u16 *max, u8 *bwg_id, + u8 *prio_type, u8 *prio_tc) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, + prio_type); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, + bwg_id, prio_type, prio_tc); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + prio_type, prio_tc); + break; + default: + break; + } + return 0; +} diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h index 1cfe38ee1644..944838fc7b59 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ixgbe/ixgbe_dcb.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -139,7 +139,6 @@ struct ixgbe_dcb_config { struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ bool pfc_mode_enable; - bool round_robin_enable; enum dcb_rx_pba_cfg rx_pba_cfg; @@ -148,12 +147,21 @@ struct ixgbe_dcb_config { }; /* DCB driver APIs */ +void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en); +void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *); +void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *); +void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); +void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); /* DCB credits calculation */ +s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame); s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, struct ixgbe_dcb_config *, int, u8); /* DCB hw initialization */ +s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *prio_type, u8 *tc_prio); +s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en); s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); /* DCB definitions for credit calculation */ diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c index 9a5e89c12e05..1bc57e52cee3 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -38,15 +38,14 @@ * * Configure packet buffers for DCB mode. */ -static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba) { s32 ret_val = 0; u32 value = IXGBE_RXPBSIZE_64KB; u8 i = 0; /* Setup Rx packet buffer sizes */ - switch (dcb_config->rx_pba_cfg) { + switch (rx_pba) { case pba_80_48: /* Setup the first four at 80KB */ value = IXGBE_RXPBSIZE_80KB; @@ -78,10 +77,11 @@ static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, * * Configure Rx Data Arbiter and credits for each traffic class. */ -static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *prio_type) { - struct tc_bw_alloc *p; u32 reg = 0; u32 credit_refill = 0; u32 credit_max = 0; @@ -102,13 +102,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; - credit_refill = p->data_credits_refill; - credit_max = p->data_credits_max; + credit_refill = refill[i]; + credit_max = max[i]; reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); - if (p->prio_type == prio_link) + if (prio_type[i] == prio_link) reg |= IXGBE_RT2CR_LSP; IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); @@ -135,10 +134,12 @@ static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ -static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) { - struct tc_bw_alloc *p; u32 reg, max_credits; u8 i; @@ -146,10 +147,8 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, /* Enable arbiter */ reg &= ~IXGBE_DPMCS_ARBDIS; - if (!(dcb_config->round_robin_enable)) { - /* Enable DFP and Recycle mode */ - reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM); - } + /* Enable DFP and Recycle mode */ + reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM); reg |= IXGBE_DPMCS_TSOEF; /* Configure Max TSO packet size 34KB including payload and headers */ reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT); @@ -158,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; - max_credits = dcb_config->tc_config[i].desc_credits_max; + max_credits = max[i]; reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; - reg |= p->data_credits_refill; - reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; - if (p->prio_type == prio_group) + if (prio_type[i] == prio_group) reg |= IXGBE_TDTQ2TCCR_GSP; - if (p->prio_type == prio_link) + if (prio_type[i] == prio_link) reg |= IXGBE_TDTQ2TCCR_LSP; IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); @@ -183,10 +181,12 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, * * Configure Tx Data Arbiter and credits for each traffic class. */ -static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) { - struct tc_bw_alloc *p; u32 reg; u8 i; @@ -200,15 +200,14 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; - reg = p->data_credits_refill; - reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT; - reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT; + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; - if (p->prio_type == prio_group) + if (prio_type[i] == prio_group) reg |= IXGBE_TDPT2TCCR_GSP; - if (p->prio_type == prio_link) + if (prio_type[i] == prio_link) reg |= IXGBE_TDPT2TCCR_LSP; IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); @@ -229,59 +228,57 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, * * Configure Priority Flow Control for each traffic class. */ -s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) { u32 reg, rx_pba_size; u8 i; - if (!dcb_config->pfc_mode_enable) - goto out; - - /* Enable Transmit Priority Flow Control */ - reg = IXGBE_READ_REG(hw, IXGBE_RMCS); - reg &= ~IXGBE_RMCS_TFCE_802_3X; - /* correct the reporting of our flow control status */ - reg |= IXGBE_RMCS_TFCE_PRIORITY; - IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); - - /* Enable Receive Priority Flow Control */ - reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); - reg &= ~IXGBE_FCTRL_RFCE; - reg |= IXGBE_FCTRL_RPFCE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); + if (pfc_en) { + /* Enable Transmit Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + reg &= ~IXGBE_RMCS_TFCE_802_3X; + /* correct the reporting of our flow control status */ + reg |= IXGBE_RMCS_TFCE_PRIORITY; + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + reg &= ~IXGBE_FCTRL_RFCE; + reg |= IXGBE_FCTRL_RPFCE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); + + /* Configure pause time */ + for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); + } /* * Configure flow control thresholds and enable priority flow control * for each traffic class. */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + int enabled = pfc_en & (1 << i); rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; reg = (rx_pba_size - hw->fc.low_water) << 10; - if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || - dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) + if (enabled == pfc_enabled_tx || + enabled == pfc_enabled_full) reg |= IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); reg = (rx_pba_size - hw->fc.high_water) << 10; - if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || - dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) + if (enabled == pfc_enabled_tx || + enabled == pfc_enabled_full) reg |= IXGBE_FCRTH_FCEN; IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); } - /* Configure pause time */ - for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800); - - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); - -out: return 0; } @@ -292,7 +289,7 @@ out: * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ -static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) +s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) { u32 reg = 0; u8 i = 0; @@ -325,13 +322,16 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) * Configure dcb settings and enable dcb mode. */ s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) + u8 rx_pba, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type) { - ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config); - ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config); - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config); - ixgbe_dcb_config_pfc_82598(hw, dcb_config); + ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba); + ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_pfc_82598(hw, pfc_en); ixgbe_dcb_config_tc_stats_82598(hw); return 0; diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h index abc03ccfa088..1e9750c2b46b 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -71,9 +71,28 @@ /* DCB hardware-specific driver APIs */ /* DCB PFC functions */ -s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); +s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en); /* DCB hw initialization */ -s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *); +s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, + u8 rx_pba, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type); #endif /* _DCB_82598_CONFIG_H */ diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c index 374e1f74d0f5..025af8c53ddb 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -33,19 +33,18 @@ /** * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure + * @rx_pba: method to distribute packet buffer * * Configure packet buffers for DCB mode. */ -static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba) { s32 ret_val = 0; u32 value = IXGBE_RXPBSIZE_64KB; u8 i = 0; /* Setup Rx packet buffer sizes */ - switch (dcb_config->rx_pba_cfg) { + switch (rx_pba) { case pba_80_48: /* Setup the first four at 80KB */ value = IXGBE_RXPBSIZE_80KB; @@ -75,14 +74,20 @@ static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, /** * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class * * Configure Rx Packet Arbiter and credits for each traffic class. */ -static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc) { - struct tc_bw_alloc *p; u32 reg = 0; u32 credit_refill = 0; u32 credit_max = 0; @@ -98,20 +103,18 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, /* Map all traffic classes to their UP, 1 to 1 */ reg = 0; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT)); + reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG]; - - credit_refill = p->data_credits_refill; - credit_max = p->data_credits_max; + credit_refill = refill[i]; + credit_max = max[i]; reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); - reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; - if (p->prio_type == prio_link) + if (prio_type[i] == prio_link) reg |= IXGBE_RTRPT4C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); @@ -130,14 +133,19 @@ static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, /** * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ -static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type) { - struct tc_bw_alloc *p; u32 reg, max_credits; u8 i; @@ -149,16 +157,15 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; - max_credits = dcb_config->tc_config[i].desc_credits_max; + max_credits = max[i]; reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; - reg |= p->data_credits_refill; - reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; - if (p->prio_type == prio_group) + if (prio_type[i] == prio_group) reg |= IXGBE_RTTDT2C_GSP; - if (p->prio_type == prio_link) + if (prio_type[i] == prio_link) reg |= IXGBE_RTTDT2C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); @@ -177,14 +184,20 @@ static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, /** * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class * * Configure Tx Packet Arbiter and credits for each traffic class. */ -static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc) { - struct tc_bw_alloc *p; u32 reg; u8 i; @@ -200,20 +213,19 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, /* Map all traffic classes to their UP, 1 to 1 */ reg = 0; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT)); + reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); /* Configure traffic class credits and priority */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG]; - reg = p->data_credits_refill; - reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT; - reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT; + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; - if (p->prio_type == prio_group) + if (prio_type[i] == prio_group) reg |= IXGBE_RTTPT2C_GSP; - if (p->prio_type == prio_link) + if (prio_type[i] == prio_link) reg |= IXGBE_RTTPT2C_LSP; IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); @@ -233,63 +245,59 @@ static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, /** * ixgbe_dcb_config_pfc_82599 - Configure priority flow control * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure + * @pfc_en: enabled pfc bitmask * * Configure Priority Flow Control (PFC) for each traffic class. */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en) { u32 i, reg, rx_pba_size; - /* If PFC is disabled globally then fall back to LFC. */ - if (!dcb_config->pfc_mode_enable) { - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - hw->mac.ops.fc_enable(hw, i); - goto out; - } - /* Configure PFC Tx thresholds per TC */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + int enabled = pfc_en & (1 << i); rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT; reg = (rx_pba_size - hw->fc.low_water) << 10; - if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || - dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) + if (enabled) reg |= IXGBE_FCRTL_XONE; IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); reg = (rx_pba_size - hw->fc.high_water) << 10; - if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || - dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) + if (enabled) reg |= IXGBE_FCRTH_FCEN; IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); } - /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time | (hw->fc.pause_time << 16); - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - - /* Enable Transmit PFC */ - reg = IXGBE_FCCFG_TFCE_PRIORITY; - IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); + if (pfc_en) { + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + + reg = IXGBE_FCCFG_TFCE_PRIORITY; + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); + /* + * Enable Receive PFC + * We will always honor XOFF frames we receive when + * we are in PFC mode. + */ + reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + reg &= ~IXGBE_MFLCN_RFCE; + reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); + + } else { + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + hw->mac.ops.fc_enable(hw, i); + } - /* - * Enable Receive PFC - * We will always honor XOFF frames we receive when - * we are in PFC mode. - */ - reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); - reg &= ~IXGBE_MFLCN_RFCE; - reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; - IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); -out: return 0; } @@ -349,7 +357,6 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) /** * ixgbe_dcb_config_82599 - Configure general DCB parameters * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure * * Configure general DCB parameters. */ @@ -406,19 +413,28 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw) /** * ixgbe_dcb_hw_config_82599 - Configure and enable DCB * @hw: pointer to hardware structure - * @dcb_config: pointer to ixgbe_dcb_config structure + * @rx_pba: method to distribute packet buffer + * @refill: refill credits index by traffic class + * @max: max credits index by traffic class + * @bwg_id: bandwidth grouping indexed by traffic class + * @prio_type: priority type indexed by traffic class + * @pfc_en: enabled pfc bitmask * * Configure dcb settings and enable dcb mode. */ s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) + u8 rx_pba, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) { - ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config); + ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba); ixgbe_dcb_config_82599(hw); - ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config); - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config); - ixgbe_dcb_config_pfc_82599(hw, dcb_config); + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, + prio_type, prio_tc); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, + bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, + bwg_id, prio_type, prio_tc); + ixgbe_dcb_config_pfc_82599(hw, pfc_en); ixgbe_dcb_config_tc_stats_82599(hw); return 0; diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h index 3841649fb954..148fd8b477a9 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -102,11 +102,32 @@ /* DCB hardware-specific driver APIs */ /* DCB PFC functions */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config); +s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en); /* DCB hw initialization */ +s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc); + +s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type); + +s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, + u16 *refill, + u16 *max, + u8 *bwg_id, + u8 *prio_type, + u8 *prio_tc); + s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *config); + u8 rx_pba, u8 pfc_en, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type, + u8 *prio_tc); #endif /* _DCB_82599_CONFIG_H */ diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index bf566e8a455e..fec4c724c37a 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -37,7 +37,6 @@ #define BIT_PG_RX 0x04 #define BIT_PG_TX 0x08 #define BIT_APP_UPCHG 0x10 -#define BIT_RESETLINK 0x40 #define BIT_LINKSPEED 0x80 /* Responses for the DCB_C_SET_ALL command */ @@ -130,7 +129,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: adapter->last_lfc_mode = adapter->hw.fc.current_mode; @@ -146,6 +144,9 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) } adapter->flags |= IXGBE_FLAG_DCB_ENABLED; + if (!netdev_get_num_tc(netdev)) + ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS); + ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); @@ -160,7 +161,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->flags |= IXGBE_FLAG_RSS_ENABLED; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: @@ -170,6 +170,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) break; } + ixgbe_setup_tc(netdev, 0); + ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); @@ -225,10 +227,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != - adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) { + adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) adapter->dcb_set_bitmap |= BIT_PG_TX; - adapter->dcb_set_bitmap |= BIT_RESETLINK; - } } static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, @@ -239,10 +239,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != - adapter->dcb_cfg.bw_percentage[0][bwg_id]) { + adapter->dcb_cfg.bw_percentage[0][bwg_id]) adapter->dcb_set_bitmap |= BIT_PG_TX; - adapter->dcb_set_bitmap |= BIT_RESETLINK; - } } static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, @@ -269,10 +267,8 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != - adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) { + adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) adapter->dcb_set_bitmap |= BIT_PG_RX; - adapter->dcb_set_bitmap |= BIT_RESETLINK; - } } static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, @@ -283,10 +279,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != - adapter->dcb_cfg.bw_percentage[1][bwg_id]) { + adapter->dcb_cfg.bw_percentage[1][bwg_id]) adapter->dcb_set_bitmap |= BIT_PG_RX; - adapter->dcb_set_bitmap |= BIT_RESETLINK; - } } static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, @@ -355,31 +349,28 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); int ret; - if (!adapter->dcb_set_bitmap) + if (!adapter->dcb_set_bitmap || + !(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return DCB_NO_HW_CHG; ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, - adapter->ring_feature[RING_F_DCB].indices); + MAX_TRAFFIC_CLASS); if (ret) return DCB_NO_HW_CHG; /* - * Only take down the adapter if the configuration change - * requires a reset. + * Only take down the adapter if an app change occured. FCoE + * may shuffle tx rings in this case and this can not be done + * without a reset currently. */ - if (adapter->dcb_set_bitmap & BIT_RESETLINK) { + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) msleep(1); - if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { - if (netif_running(netdev)) - netdev->netdev_ops->ndo_stop(netdev); - ixgbe_clear_interrupt_scheme(adapter); - } else { - if (netif_running(netdev)) - ixgbe_down(adapter); - } + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + ixgbe_clear_interrupt_scheme(adapter); } if (adapter->dcb_cfg.pfc_mode_enable) { @@ -408,29 +399,53 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) } } - if (adapter->dcb_set_bitmap & BIT_RESETLINK) { - if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { - ixgbe_init_interrupt_scheme(adapter); - if (netif_running(netdev)) - netdev->netdev_ops->ndo_open(netdev); - } else { - if (netif_running(netdev)) - ixgbe_up(adapter); - } + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { + ixgbe_init_interrupt_scheme(adapter); + if (netif_running(netdev)) + netdev->netdev_ops->ndo_open(netdev); ret = DCB_HW_CHG_RST; - } else if (adapter->dcb_set_bitmap & BIT_PFC) { - if (adapter->hw.mac.type == ixgbe_mac_82598EB) - ixgbe_dcb_config_pfc_82598(&adapter->hw, - &adapter->dcb_cfg); - else if (adapter->hw.mac.type == ixgbe_mac_82599EB) - ixgbe_dcb_config_pfc_82599(&adapter->hw, - &adapter->dcb_cfg); + } + + if (adapter->dcb_set_bitmap & BIT_PFC) { + u8 pfc_en; + ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en); + ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en); ret = DCB_HW_CHG; } + + if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { + u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; + u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; + /* Priority to TC mapping in CEE case default to 1:1 */ + u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + +#ifdef CONFIG_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + + ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, + max_frame, DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, + max_frame, DCB_RX_CONFIG); + + ixgbe_dcb_unpack_refill(&adapter->dcb_cfg, + DCB_TX_CONFIG, refill); + ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max); + ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg, + DCB_TX_CONFIG, bwg_id); + ixgbe_dcb_unpack_prio(&adapter->dcb_cfg, + DCB_TX_CONFIG, prio_type); + + ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, + bwg_id, prio_type, prio_tc); + } + if (adapter->dcb_cfg.pfc_mode_enable) adapter->hw.fc.current_mode = ixgbe_fc_pfc; - if (adapter->dcb_set_bitmap & BIT_RESETLINK) + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) clear_bit(__IXGBE_RESETTING, &adapter->state); adapter->dcb_set_bitmap = 0x00; return ret; @@ -439,40 +454,38 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u8 rval = 0; - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - switch (capid) { - case DCB_CAP_ATTR_PG: - *cap = true; - break; - case DCB_CAP_ATTR_PFC: - *cap = true; - break; - case DCB_CAP_ATTR_UP2TC: - *cap = false; - break; - case DCB_CAP_ATTR_PG_TCS: - *cap = 0x80; - break; - case DCB_CAP_ATTR_PFC_TCS: - *cap = 0x80; - break; - case DCB_CAP_ATTR_GSP: - *cap = true; - break; - case DCB_CAP_ATTR_BCN: - *cap = false; - break; - default: - rval = -EINVAL; - break; - } - } else { - rval = -EINVAL; + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcbx_cap; + break; + default: + *cap = false; + break; } - return rval; + return 0; } static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) @@ -533,21 +546,16 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) */ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) { - u8 rval = 0; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; - switch (idtype) { - case DCB_APP_IDTYPE_ETHTYPE: -#ifdef IXGBE_FCOE - if (id == ETH_P_FCOE) - rval = ixgbe_fcoe_getapp(netdev_priv(netdev)); -#endif - break; - case DCB_APP_IDTYPE_PORTNUM: - break; - default: - break; - } - return rval; + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 0; + + return dcb_getapp(netdev, &app); } /** @@ -562,24 +570,45 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 id, u8 up) { + struct ixgbe_adapter *adapter = netdev_priv(netdev); u8 rval = 1; + struct dcb_app app = { + .selector = idtype, + .protocol = id, + .priority = up + }; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return rval; + + rval = dcb_setapp(netdev, &app); switch (idtype) { case DCB_APP_IDTYPE_ETHTYPE: #ifdef IXGBE_FCOE if (id == ETH_P_FCOE) { - u8 tc; - struct ixgbe_adapter *adapter; + u8 old_tc; - adapter = netdev_priv(netdev); - tc = adapter->fcoe.tc; + /* Get current programmed tc */ + old_tc = adapter->fcoe.tc; rval = ixgbe_fcoe_setapp(adapter, up); - if ((!rval) && (tc != adapter->fcoe.tc) && - (adapter->flags & IXGBE_FLAG_DCB_ENABLED) && - (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { + + if (rval || + !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || + !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + break; + + /* The FCoE application priority may be changed multiple + * times in quick sucession with switches that build up + * TLVs. To avoid creating uneeded device resets this + * checks the actual HW configuration and clears + * BIT_APP_UPCHG if a HW configuration change is not + * need + */ + if (old_tc == adapter->fcoe.tc) + adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG; + else adapter->dcb_set_bitmap |= BIT_APP_UPCHG; - adapter->dcb_set_bitmap |= BIT_RESETLINK; - } } #endif break; @@ -591,7 +620,204 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, return rval; } +static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; + + /* No IEEE PFC settings available */ + if (!my_ets) + return -EINVAL; + + ets->ets_cap = MAX_TRAFFIC_CLASS; + ets->cbs = my_ets->cbs; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + return 0; +} + +static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; + __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; + int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i, err; + __u64 *p = (__u64 *) ets->prio_tc; + /* naively give each TC a bwg to map onto CEE hardware */ + __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->ixgbe_ieee_ets) { + adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), + GFP_KERNEL); + if (!adapter->ixgbe_ieee_ets) + return -ENOMEM; + } + + memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); + + /* Map TSA onto CEE prio type */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + prio_type[i] = 2; + break; + case IEEE_8021QAZ_TSA_ETS: + prio_type[i] = 0; + break; + default: + /* Hardware only supports priority strict or + * ETS transmission selection algorithms if + * we receive some other value from dcbnl + * throw an error + */ + return -EINVAL; + } + } + + if (*p) + ixgbe_dcbnl_set_state(dev, 1); + else + ixgbe_dcbnl_set_state(dev, 0); + + ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); + err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, + bwg_id, prio_type, ets->prio_tc); + return err; +} + +static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; + int i; + + /* No IEEE PFC settings available */ + if (!my_pfc) + return -EINVAL; + + pfc->pfc_cap = MAX_TRAFFIC_CLASS; + pfc->pfc_en = my_pfc->pfc_en; + pfc->mbc = my_pfc->mbc; + pfc->delay = my_pfc->delay; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + pfc->requests[i] = adapter->stats.pxoffrxc[i]; + pfc->indications[i] = adapter->stats.pxofftxc[i]; + } + + return 0; +} + +static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + int err; + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + if (!adapter->ixgbe_ieee_pfc) { + adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), + GFP_KERNEL); + if (!adapter->ixgbe_ieee_pfc) + return -ENOMEM; + } + + memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); + err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en); + return err; +} + +static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; +#ifdef IXGBE_FCOE + if (app->selector == 1 && app->protocol == ETH_P_FCOE) { + if (adapter->fcoe.tc == app->priority) + goto setapp; + + /* In IEEE mode map up to tc 1:1 */ + adapter->fcoe.tc = app->priority; + adapter->fcoe.up = app->priority; + + /* Force hardware reset required to push FCoE + * setup on {tx|rx}_rings + */ + adapter->dcb_set_bitmap |= BIT_APP_UPCHG; + ixgbe_dcbnl_set_all(dev); + } + +setapp: +#endif + dcb_setapp(dev, app); + return 0; +} + +static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + return adapter->dcbx_cap; +} + +static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + + /* no support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + if (mode == adapter->dcbx_cap) + return 0; + + adapter->dcbx_cap = mode; + + /* ETS and PFC defaults */ + ets.ets_cap = 8; + pfc.pfc_cap = 8; + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX); + ixgbe_dcbnl_set_all(dev); + } else { + /* Drop into single TC mode strict priority as this + * indicates CEE and IEEE versions are disabled + */ + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); + ixgbe_dcbnl_set_state(dev, 0); + } + + return 0; +} + const struct dcbnl_rtnl_ops dcbnl_ops = { + .ieee_getets = ixgbe_dcbnl_ieee_getets, + .ieee_setets = ixgbe_dcbnl_ieee_setets, + .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, + .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, + .ieee_setapp = ixgbe_dcbnl_ieee_setapp, .getstate = ixgbe_dcbnl_get_state, .setstate = ixgbe_dcbnl_set_state, .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, @@ -613,5 +839,6 @@ const struct dcbnl_rtnl_ops dcbnl_ops = { .setpfcstate = ixgbe_dcbnl_setpfcstate, .getapp = ixgbe_dcbnl_getapp, .setapp = ixgbe_dcbnl_setapp, + .getdcbx = ixgbe_dcbnl_getdcbx, + .setdcbx = ixgbe_dcbnl_setdcbx, }; - diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index 2002ea88ca2a..76380a2b35aa 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -152,20 +152,35 @@ static int ixgbe_get_settings(struct net_device *netdev, ecmd->supported |= (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg); + switch (hw->mac.type) { + case ixgbe_mac_X540: + ecmd->supported |= SUPPORTED_100baseT_Full; + break; + default: + break; + } + ecmd->advertising = ADVERTISED_Autoneg; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - /* - * It's possible that phy.autoneg_advertised may not be - * set yet. If so display what the default would be - - * both 1G and 10G supported. - */ - if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full | - ADVERTISED_10000baseT_Full))) + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_1GB_FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } else { + /* + * Default advertised modes in case + * phy.autoneg_advertised isn't set. + */ ecmd->advertising |= (ADVERTISED_10000baseT_Full | ADVERTISED_1000baseT_Full); + if (hw->mac.type == ixgbe_mac_X540) + ecmd->advertising |= ADVERTISED_100baseT_Full; + } if (hw->phy.media_type == ixgbe_media_type_copper) { ecmd->supported |= SUPPORTED_TP; @@ -271,8 +286,19 @@ static int ixgbe_get_settings(struct net_device *netdev, hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up) { - ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? - SPEED_10000 : SPEED_1000; + switch (link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ecmd->speed = SPEED_10000; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + ecmd->speed = SPEED_1000; + break; + case IXGBE_LINK_SPEED_100_FULL: + ecmd->speed = SPEED_100; + break; + default: + break; + } ecmd->duplex = DUPLEX_FULL; } else { ecmd->speed = -1; @@ -306,6 +332,9 @@ static int ixgbe_set_settings(struct net_device *netdev, if (ecmd->advertising & ADVERTISED_1000baseT_Full) advertised |= IXGBE_LINK_SPEED_1GB_FULL; + if (ecmd->advertising & ADVERTISED_100baseT_Full) + advertised |= IXGBE_LINK_SPEED_100_FULL; + if (old == advertised) return err; /* this sets the link speed and restarts auto-neg */ diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index c54a88274d51..dba7d77588ef 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -135,22 +135,19 @@ out_ddp_put: return len; } + /** - * ixgbe_fcoe_ddp_get - called to set up ddp context + * ixgbe_fcoe_ddp_setup - called to set up ddp context * @netdev: the corresponding net_device * @xid: the exchange id requesting ddp * @sgl: the scatter-gather list for this request * @sgc: the number of scatter-gather items * - * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup - * and is expected to be called from ULD, e.g., FCP layer of libfc - * to set up ddp for the corresponding xid of the given sglist for - * the corresponding I/O. - * * Returns : 1 for success and 0 for no ddp */ -int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, - struct scatterlist *sgl, unsigned int sgc) +static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc, + int target_mode) { struct ixgbe_adapter *adapter; struct ixgbe_hw *hw; @@ -164,7 +161,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, unsigned int lastsize; unsigned int thisoff = 0; unsigned int thislen = 0; - u32 fcbuff, fcdmarw, fcfltrw; + u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; dma_addr_t addr = 0; if (!netdev || !sgl) @@ -275,6 +272,9 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); + /* Set WRCONTX bit to allow DDP for target */ + if (target_mode) + fcbuff |= (IXGBE_FCBUFF_WRCONTX); fcbuff |= (IXGBE_FCBUFF_VALID); fcdmarw = xid; @@ -287,6 +287,16 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, /* program DMA context */ hw = &adapter->hw; spin_lock_bh(&fcoe->lock); + + /* turn on last frame indication for target mode as FCP_RSPtarget is + * supposed to send FCP_RSP when it is done. */ + if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { + set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); + fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); + fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; + IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); + } + IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); @@ -295,6 +305,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); + spin_unlock_bh(&fcoe->lock); return 1; @@ -309,6 +320,47 @@ out_noddp_unmap: } /** + * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. + * + * Returns : 1 for success and 0 for no ddp + */ +int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); +} + +/** + * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode + * @netdev: the corresponding net_device + * @xid: the exchange id requesting ddp + * @sgl: the scatter-gather list for this request + * @sgc: the number of scatter-gather items + * + * This is the implementation of net_device_ops.ndo_fcoe_ddp_target + * and is expected to be called from ULD, e.g., FCP layer of libfc + * to set up ddp for the corresponding xid of the given sglist for + * the corresponding I/O. The DDP in target mode is a write I/O request + * from the initiator. + * + * Returns : 1 for success and 0 for no ddp + */ +int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + struct scatterlist *sgl, unsigned int sgc) +{ + return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); +} + +/** * ixgbe_fcoe_ddp - check ddp status and mark it done * @adapter: ixgbe adapter * @rx_desc: advanced rx descriptor @@ -331,6 +383,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, struct ixgbe_fcoe *fcoe; struct ixgbe_fcoe_ddp *ddp; struct fc_frame_header *fh; + struct fcoe_crc_eof *crc; if (!ixgbe_rx_is_fcoe(rx_desc)) goto ddp_out; @@ -384,7 +437,18 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, else if (ddp->len) rc = ddp->len; } - + /* In target mode, check the last data frame of the sequence. + * For DDP in target mode, data is already DDPed but the header + * indication of the last data frame ould allow is to tell if we + * got all the data and the ULP can send FCP_RSP back, as this is + * not a full fcoe frame, we fill the trailer here so it won't be + * dropped by the ULP stack. + */ + if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && + (fctl & FC_FC_END_SEQ)) { + crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); + crc->fcoe_eof = FC_EOF_T; + } ddp_out: return rc; } @@ -749,21 +813,6 @@ out_disable: #ifdef CONFIG_IXGBE_DCB /** - * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE - * @adapter : ixgbe adapter - * - * Finds out the corresponding user priority bitmap from the current - * traffic class that FCoE belongs to. Returns 0 as the invalid user - * priority bitmap to indicate an error. - * - * Returns : 802.1p user priority bitmap for FCoE - */ -u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter) -{ - return 1 << adapter->fcoe.up; -} - -/** * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE * @adapter : ixgbe adapter * @up : 802.1p user priority bitmap @@ -840,5 +889,3 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) } return rc; } - - diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h index 65cc8fb14fe7..5a650a4ace66 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ixgbe/ixgbe_fcoe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -52,6 +52,9 @@ /* fcerr */ #define IXGBE_FCERR_BADCRC 0x00100000 +/* FCoE DDP for target mode */ +#define __IXGBE_FCOE_TARGET 1 + struct ixgbe_fcoe_ddp { int len; u32 err; @@ -66,6 +69,7 @@ struct ixgbe_fcoe { u8 tc; u8 up; #endif + unsigned long mode; atomic_t refcnt; spinlock_t lock; struct pci_pool *pool; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 30f9ccfb4f87..f17e4a7ee731 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -54,7 +54,8 @@ static const char ixgbe_driver_string[] = #define DRV_VERSION "3.2.9-k2" const char ixgbe_driver_version[] = DRV_VERSION; -static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; +static const char ixgbe_copyright[] = + "Copyright (c) 1999-2011 Intel Corporation."; static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, @@ -648,10 +649,10 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, * * Returns : a tc index for use in range 0-7, or 0-3 */ -u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) +static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) { int tc = -1; - int dcb_i = adapter->ring_feature[RING_F_DCB].indices; + int dcb_i = netdev_get_num_tc(adapter->netdev); /* if DCB is not enabled the queues have no TC */ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) @@ -2597,6 +2598,11 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) i--; for (; i >= 0; i--) { + /* free only the irqs that were actually requested */ + if (!adapter->q_vector[i]->rxr_count && + !adapter->q_vector[i]->txr_count) + continue; + free_irq(adapter->msix_entries[i].vector, adapter->q_vector[i]); } @@ -2886,17 +2892,20 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) ); switch (mask) { +#ifdef CONFIG_IXGBE_DCB + case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED): + mrqc = IXGBE_MRQC_RTRSS8TCEN; + break; + case (IXGBE_FLAG_DCB_ENABLED): + mrqc = IXGBE_MRQC_RT8TCEN; + break; +#endif /* CONFIG_IXGBE_DCB */ case (IXGBE_FLAG_RSS_ENABLED): mrqc = IXGBE_MRQC_RSSEN; break; case (IXGBE_FLAG_SRIOV_ENABLED): mrqc = IXGBE_MRQC_VMDQEN; break; -#ifdef CONFIG_IXGBE_DCB - case (IXGBE_FLAG_DCB_ENABLED): - mrqc = IXGBE_MRQC_RT8TCEN; - break; -#endif /* CONFIG_IXGBE_DCB */ default: break; } @@ -3077,6 +3086,14 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ixgbe_configure_srrctl(adapter, ring); ixgbe_configure_rscctl(adapter, ring); + /* If operating in IOV mode set RLPML for X540 */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && + hw->mac.type == ixgbe_mac_X540) { + rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; + rxdctl |= ((ring->netdev->mtu + ETH_HLEN + + ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN); + } + if (hw->mac.type == ixgbe_mac_82598EB) { /* * enable cache line friendly hardware writes: @@ -3641,15 +3658,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) if (hw->mac.type == ixgbe_mac_82598EB) netif_set_gso_max_size(adapter->netdev, 32768); -#ifdef CONFIG_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) - max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); -#endif - - ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, - DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, - DCB_RX_CONFIG); /* Enable VLAN tag insert/strip */ adapter->netdev->features |= NETIF_F_HW_VLAN_RX; @@ -3657,7 +3665,43 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); /* reconfigure the hardware */ - ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); + if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) { +#ifdef CONFIG_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_RX_CONFIG); + ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); + } else { + struct net_device *dev = adapter->netdev; + + if (adapter->ixgbe_ieee_ets) + dev->dcbnl_ops->ieee_setets(dev, + adapter->ixgbe_ieee_ets); + if (adapter->ixgbe_ieee_pfc) + dev->dcbnl_ops->ieee_setpfc(dev, + adapter->ixgbe_ieee_pfc); + } + + /* Enable RSS Hash per TC */ + if (hw->mac.type != ixgbe_mac_82598EB) { + int i; + u32 reg = 0; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + u8 msb = 0; + u8 cnt = adapter->netdev->tc_to_txq[i].count; + + while (cnt >>= 1) + msb++; + + reg |= msb << IXGBE_RQTC_SHIFT_TC(i); + } + IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); + } } #endif @@ -3761,7 +3805,8 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) if (ret) goto link_cfg_out; - if (hw->mac.ops.get_link_capabilities) + autoneg = hw->phy.autoneg_advertised; + if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation); if (ret) @@ -3876,7 +3921,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter) * If we're not hot-pluggable SFP+, we just need to configure link * and bring it up. */ - if (hw->phy.type == ixgbe_phy_unknown) + if (hw->phy.type == ixgbe_phy_none) schedule_work(&adapter->sfp_config_module_task); /* enable transmits */ @@ -4243,24 +4288,6 @@ static void ixgbe_reset_task(struct work_struct *work) ixgbe_reinit_locked(adapter); } -#ifdef CONFIG_IXGBE_DCB -static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) -{ - bool ret = false; - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; - - if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - return ret; - - f->mask = 0x7 << 3; - adapter->num_rx_queues = f->indices; - adapter->num_tx_queues = f->indices; - ret = true; - - return ret; -} -#endif - /** * ixgbe_set_rss_queues: Allocate queues for RSS * @adapter: board private structure to initialize @@ -4331,19 +4358,26 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) **/ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) { - bool ret = false; struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - f->indices = min((int)num_online_cpus(), f->indices); - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return false; + + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { #ifdef CONFIG_IXGBE_DCB - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - e_info(probe, "FCoE enabled with DCB\n"); - ixgbe_set_dcb_queues(adapter); - } + int tc; + struct net_device *dev = adapter->netdev; + + tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); + f->indices = dev->tc_to_txq[tc].count; + f->mask = dev->tc_to_txq[tc].offset; #endif + } else { + f->indices = min((int)num_online_cpus(), f->indices); + + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { e_info(probe, "FCoE enabled with RSS\n"); if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || @@ -4356,14 +4390,45 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) f->mask = adapter->num_rx_queues; adapter->num_rx_queues += f->indices; adapter->num_tx_queues += f->indices; + } - ret = true; + return true; +} +#endif /* IXGBE_FCOE */ + +#ifdef CONFIG_IXGBE_DCB +static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +{ + bool ret = false; + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; + int i, q; + + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return ret; + + f->indices = 0; + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS); + f->indices += q; } + f->mask = 0x7 << 3; + adapter->num_rx_queues = f->indices; + adapter->num_tx_queues = f->indices; + ret = true; + +#ifdef IXGBE_FCOE + /* FCoE enabled queues require special configuration done through + * configure_fcoe() and others. Here we map FCoE indices onto the + * DCB queue pairs allowing FCoE to own configuration later. + */ + ixgbe_set_fcoe_queues(adapter); +#endif + return ret; } +#endif -#endif /* IXGBE_FCOE */ /** * ixgbe_set_sriov_queues: Allocate queues for IOV use * @adapter: board private structure to initialize @@ -4399,16 +4464,16 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) if (ixgbe_set_sriov_queues(adapter)) goto done; -#ifdef IXGBE_FCOE - if (ixgbe_set_fcoe_queues(adapter)) - goto done; - -#endif /* IXGBE_FCOE */ #ifdef CONFIG_IXGBE_DCB if (ixgbe_set_dcb_queues(adapter)) goto done; #endif +#ifdef IXGBE_FCOE + if (ixgbe_set_fcoe_queues(adapter)) + goto done; + +#endif /* IXGBE_FCOE */ if (ixgbe_set_fdir_queues(adapter)) goto done; @@ -4500,6 +4565,110 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) } #ifdef CONFIG_IXGBE_DCB + +/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ +void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, + unsigned int *tx, unsigned int *rx) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; + *rx = 0; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + *tx = tc << 3; + *rx = tc << 2; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (num_tcs == 8) { + if (tc < 3) { + *tx = tc << 5; + *rx = tc << 4; + } else if (tc < 5) { + *tx = ((tc + 2) << 4); + *rx = tc << 4; + } else if (tc < num_tcs) { + *tx = ((tc + 8) << 3); + *rx = tc << 4; + } + } else if (num_tcs == 4) { + *rx = tc << 5; + switch (tc) { + case 0: + *tx = 0; + break; + case 1: + *tx = 64; + break; + case 2: + *tx = 96; + break; + case 3: + *tx = 112; + break; + default: + break; + } + } + break; + default: + break; + } +} + +#define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS) + +/* ixgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int ixgbe_setup_tc(struct net_device *dev, u8 tc) +{ + int i; + unsigned int q, offset = 0; + + if (!tc) { + netdev_reset_tc(dev); + } else { + struct ixgbe_adapter *adapter = netdev_priv(dev); + + /* Hardware supports up to 8 traffic classes */ + if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc)) + return -EINVAL; + + /* Partition Tx queues evenly amongst traffic classes */ + for (i = 0; i < tc; i++) { + q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC); + netdev_set_prio_tc_map(dev, i, i); + netdev_set_tc_queue(dev, i, q, offset); + offset += q; + } + + /* This enables multiple traffic class support in the hardware + * which defaults to strict priority transmission by default. + * If traffic classes are already enabled perhaps through DCB + * code path then existing configuration will be used. + */ + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) { + struct ieee_ets ets = { + .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7}, + }; + u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; + + dev->dcbnl_ops->setdcbx(dev, mode); + dev->dcbnl_ops->ieee_setets(dev, &ets); + } + } + return 0; +} + /** * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB * @adapter: board private structure to initialize @@ -4509,72 +4678,27 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) **/ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) { - int i; - bool ret = false; - int dcb_i = adapter->ring_feature[RING_F_DCB].indices; + struct net_device *dev = adapter->netdev; + int i, j, k; + u8 num_tcs = netdev_get_num_tc(dev); if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) return false; - /* the number of queues is assumed to be symmetric */ - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - for (i = 0; i < dcb_i; i++) { - adapter->rx_ring[i]->reg_idx = i << 3; - adapter->tx_ring[i]->reg_idx = i << 2; - } - ret = true; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - if (dcb_i == 8) { - /* - * Tx TC0 starts at: descriptor queue 0 - * Tx TC1 starts at: descriptor queue 32 - * Tx TC2 starts at: descriptor queue 64 - * Tx TC3 starts at: descriptor queue 80 - * Tx TC4 starts at: descriptor queue 96 - * Tx TC5 starts at: descriptor queue 104 - * Tx TC6 starts at: descriptor queue 112 - * Tx TC7 starts at: descriptor queue 120 - * - * Rx TC0-TC7 are offset by 16 queues each - */ - for (i = 0; i < 3; i++) { - adapter->tx_ring[i]->reg_idx = i << 5; - adapter->rx_ring[i]->reg_idx = i << 4; - } - for ( ; i < 5; i++) { - adapter->tx_ring[i]->reg_idx = ((i + 2) << 4); - adapter->rx_ring[i]->reg_idx = i << 4; - } - for ( ; i < dcb_i; i++) { - adapter->tx_ring[i]->reg_idx = ((i + 8) << 3); - adapter->rx_ring[i]->reg_idx = i << 4; - } - ret = true; - } else if (dcb_i == 4) { - /* - * Tx TC0 starts at: descriptor queue 0 - * Tx TC1 starts at: descriptor queue 64 - * Tx TC2 starts at: descriptor queue 96 - * Tx TC3 starts at: descriptor queue 112 - * - * Rx TC0-TC3 are offset by 32 queues each - */ - adapter->tx_ring[0]->reg_idx = 0; - adapter->tx_ring[1]->reg_idx = 64; - adapter->tx_ring[2]->reg_idx = 96; - adapter->tx_ring[3]->reg_idx = 112; - for (i = 0 ; i < dcb_i; i++) - adapter->rx_ring[i]->reg_idx = i << 5; - ret = true; + for (i = 0, k = 0; i < num_tcs; i++) { + unsigned int tx_s, rx_s; + u16 count = dev->tc_to_txq[i].count; + + ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); + for (j = 0; j < count; j++, k++) { + adapter->tx_ring[k]->reg_idx = tx_s + j; + adapter->rx_ring[k]->reg_idx = rx_s + j; + adapter->tx_ring[k]->dcb_tc = i; + adapter->rx_ring[k]->dcb_tc = i; } - break; - default: - break; } - return ret; + + return true; } #endif @@ -4620,33 +4744,6 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return false; -#ifdef CONFIG_IXGBE_DCB - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - - ixgbe_cache_ring_dcb(adapter); - /* find out queues in TC for FCoE */ - fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; - fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; - /* - * In 82599, the number of Tx queues for each traffic - * class for both 8-TC and 4-TC modes are: - * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 - * 8 TCs: 32 32 16 16 8 8 8 8 - * 4 TCs: 64 64 32 32 - * We have max 8 queues for FCoE, where 8 the is - * FCoE redirection table size. If TC for FCoE is - * less than or equal to TC3, we have enough queues - * to add max of 8 queues for FCoE, so we start FCoE - * Tx queue from the next one, i.e., reg_idx + 1. - * If TC for FCoE is above TC3, implying 8 TC mode, - * and we need 8 for FCoE, we have to take all queues - * in that traffic class for FCoE. - */ - if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) - fcoe_tx_i--; - } -#endif /* CONFIG_IXGBE_DCB */ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) @@ -4703,16 +4800,16 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) if (ixgbe_cache_ring_sriov(adapter)) return; +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_cache_ring_dcb(adapter)) + return; +#endif + #ifdef IXGBE_FCOE if (ixgbe_cache_ring_fcoe(adapter)) return; - #endif /* IXGBE_FCOE */ -#ifdef CONFIG_IXGBE_DCB - if (ixgbe_cache_ring_dcb(adapter)) - return; -#endif if (ixgbe_cache_ring_fdir(adapter)) return; @@ -5174,10 +5271,10 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; adapter->dcb_cfg.rx_pba_cfg = pba_equal; adapter->dcb_cfg.pfc_mode_enable = false; - adapter->dcb_cfg.round_robin_enable = false; adapter->dcb_set_bitmap = 0x00; + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, - adapter->ring_feature[RING_F_DCB].indices); + MAX_TRAFFIC_CLASS); #endif @@ -5442,8 +5539,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; /* MTU < 68 is an error and causes problems on some kernels */ - if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) - return -EINVAL; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED && + hw->mac.type != ixgbe_mac_X540) { + if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) + return -EINVAL; + } else { + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + } e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); /* must set new MTU before calling down or up */ @@ -5611,6 +5714,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) } ixgbe_clear_interrupt_scheme(adapter); +#ifdef CONFIG_DCB + kfree(adapter->ixgbe_ieee_pfc); + kfree(adapter->ixgbe_ieee_ets); +#endif #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -6101,12 +6208,16 @@ static void ixgbe_watchdog_task(struct work_struct *work) (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? "10 Gbps" : (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? - "1 Gbps" : "unknown speed")), + "1 Gbps" : + (link_speed == IXGBE_LINK_SPEED_100_FULL ? + "100 Mbps" : + "unknown speed"))), ((flow_rx && flow_tx) ? "RX/TX" : (flow_rx ? "RX" : (flow_tx ? "TX" : "None")))); netif_carrier_on(netdev); + ixgbe_check_vf_rate_limit(adapter); } else { /* Force detection of hung controller */ for (i = 0; i < adapter->num_tx_queues; i++) { @@ -6636,18 +6747,12 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) protocol = vlan_get_protocol(skb); - if ((protocol == htons(ETH_P_FCOE)) || - (protocol == htons(ETH_P_FIP))) { - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); - txq += adapter->ring_feature[RING_F_FCOE].mask; - return txq; -#ifdef CONFIG_IXGBE_DCB - } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - txq = adapter->fcoe.up; - return txq; -#endif - } + if (((protocol == htons(ETH_P_FCOE)) || + (protocol == htons(ETH_P_FIP))) && + (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { + txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); + txq += adapter->ring_feature[RING_F_FCOE].mask; + return txq; } #endif @@ -6657,15 +6762,6 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) return txq; } - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - if (skb->priority == TC_PRIO_CONTROL) - txq = adapter->ring_feature[RING_F_DCB].indices-1; - else - txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) - >> 13; - return txq; - } - return skb_tx_hash(dev, skb); } @@ -6687,13 +6783,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, tx_flags |= vlan_tx_tag_get(skb); if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; - tx_flags |= ((skb->queue_mapping & 0x7) << 13); + tx_flags |= tx_ring->dcb_tc << 13; } tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && skb->priority != TC_PRIO_CONTROL) { - tx_flags |= ((skb->queue_mapping & 0x7) << 13); + tx_flags |= tx_ring->dcb_tc << 13; tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } @@ -6702,20 +6798,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, /* for FCoE with DCB, we force the priority to what * was specified by the switch */ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && - (protocol == htons(ETH_P_FCOE) || - protocol == htons(ETH_P_FIP))) { -#ifdef CONFIG_IXGBE_DCB - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK - << IXGBE_TX_FLAGS_VLAN_SHIFT); - tx_flags |= ((adapter->fcoe.up << 13) - << IXGBE_TX_FLAGS_VLAN_SHIFT); - } -#endif - /* flag for FCoE offloads */ - if (protocol == htons(ETH_P_FCOE)) - tx_flags |= IXGBE_TX_FLAGS_FCOE; - } + (protocol == htons(ETH_P_FCOE))) + tx_flags |= IXGBE_TX_FLAGS_FCOE; #endif /* four things can cause us to need a context descriptor */ @@ -6988,11 +7072,15 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, +#ifdef CONFIG_IXGBE_DCB + .ndo_setup_tc = ixgbe_setup_tc, +#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif #ifdef IXGBE_FCOE .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, + .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, .ndo_fcoe_enable = ixgbe_fcoe_enable, .ndo_fcoe_disable = ixgbe_fcoe_disable, @@ -7128,8 +7216,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, else indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); +#if defined(CONFIG_DCB) indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); -#ifdef IXGBE_FCOE +#elif defined(IXGBE_FCOE) indices += min_t(unsigned int, num_possible_cpus(), IXGBE_MAX_FCOE_INDICES); #endif @@ -7285,8 +7374,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED); - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; #ifdef CONFIG_IXGBE_DCB netdev->dcbnl_ops = &dcbnl_ops; @@ -7706,16 +7793,6 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, #endif /* CONFIG_IXGBE_DCA */ -/** - * ixgbe_get_hw_dev return device - * used by hardware layer to print debugging information - **/ -struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw) -{ - struct ixgbe_adapter *adapter = hw->back; - return adapter->netdev; -} - module_exit(ixgbe_exit_module); /* ixgbe_main.c */ diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c index ea82c5a1cd3e..1ff0eefcfd0a 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ixgbe/ixgbe_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -154,9 +154,6 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) udelay(mbx->usec_delay); } - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; out: return countdown ? 0 : IXGBE_ERR_MBX; } @@ -183,9 +180,6 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) udelay(mbx->usec_delay); } - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; out: return countdown ? 0 : IXGBE_ERR_MBX; } @@ -437,6 +431,7 @@ out_no_read: return ret_val; } +#ifdef CONFIG_PCI_IOV /** * ixgbe_init_mbx_params_pf - set initial values for pf mailbox * @hw: pointer to the HW structure @@ -447,24 +442,22 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) { struct ixgbe_mbx_info *mbx = &hw->mbx; - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - mbx->timeout = 0; - mbx->usec_delay = 0; + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X540) + return; - mbx->size = IXGBE_VFMAILBOX_SIZE; + mbx->timeout = 0; + mbx->usec_delay = 0; - mbx->stats.msgs_tx = 0; - mbx->stats.msgs_rx = 0; - mbx->stats.reqs = 0; - mbx->stats.acks = 0; - mbx->stats.rsts = 0; - break; - default: - break; - } + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + mbx->size = IXGBE_VFMAILBOX_SIZE; } +#endif /* CONFIG_PCI_IOV */ struct ixgbe_mbx_operations mbx_ops_generic = { .read = ixgbe_read_mbx_pf, diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h index 3df9b1590218..fe6ea81dc7f8 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ixgbe/ixgbe_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -86,7 +86,9 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); +#ifdef CONFIG_PCI_IOV void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); +#endif /* CONFIG_PCI_IOV */ extern struct ixgbe_mbx_operations mbx_ops_generic; diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 8f7123e8fc0a..f72f705f6183 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -57,6 +57,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_PHY_ADDR_INVALID; u32 phy_addr; + u16 ext_ability = 0; if (hw->phy.type == ixgbe_phy_unknown) { for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { @@ -65,12 +66,29 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) ixgbe_get_phy_id(hw); hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); + + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.ops.read_reg(hw, + MDIO_PMA_EXTABLE, + MDIO_MMD_PMAPMD, + &ext_ability); + if (ext_ability & + (MDIO_PMA_EXTABLE_10GBT | + MDIO_PMA_EXTABLE_1000BT)) + hw->phy.type = + ixgbe_phy_cu_unknown; + else + hw->phy.type = + ixgbe_phy_generic; + } + status = 0; break; } } /* clear value if nothing found */ - hw->phy.mdio.prtad = 0; + if (status != 0) + hw->phy.mdio.prtad = 0; } else { status = 0; } @@ -138,17 +156,51 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) **/ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) { + u32 i; + u16 ctrl = 0; + s32 status = 0; + + if (hw->phy.type == ixgbe_phy_unknown) + status = ixgbe_identify_phy_generic(hw); + + if (status != 0 || hw->phy.type == ixgbe_phy_none) + goto out; + /* Don't reset PHY if it's shut down due to overtemp. */ if (!hw->phy.reset_if_overtemp && (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) - return 0; + goto out; /* * Perform soft PHY reset to the PHY_XS. * This will cause a soft reset to the PHY */ - return hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, - MDIO_CTRL1_RESET); + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_PHYXS, + MDIO_CTRL1_RESET); + + /* + * Poll for reset bit to self-clear indicating reset is complete. + * Some PHYs could take up to 3 seconds to complete and need about + * 1.7 usec delay after the reset is complete. + */ + for (i = 0; i < 30; i++) { + msleep(100); + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_PHYXS, &ctrl); + if (!(ctrl & MDIO_CTRL1_RESET)) { + udelay(2); + break; + } + } + + if (ctrl & MDIO_CTRL1_RESET) { + status = IXGBE_ERR_RESET_FAILED; + hw_dbg(hw, "PHY reset polling failed to complete.\n"); + } + +out: + return status; } /** @@ -171,7 +223,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, else gssr = IXGBE_GSSR_PHY0_SM; - if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) status = IXGBE_ERR_SWFW_SYNC; if (status == 0) { @@ -243,7 +295,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, } } - ixgbe_release_swfw_sync(hw, gssr); + hw->mac.ops.release_swfw_sync(hw, gssr); } return status; @@ -269,7 +321,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, else gssr = IXGBE_GSSR_PHY0_SM; - if (ixgbe_acquire_swfw_sync(hw, gssr) != 0) + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) status = IXGBE_ERR_SWFW_SYNC; if (status == 0) { @@ -336,7 +388,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, } } - ixgbe_release_swfw_sync(hw, gssr); + hw->mac.ops.release_swfw_sync(hw, gssr); } return status; @@ -350,49 +402,89 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, **/ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) { - s32 status = IXGBE_NOT_IMPLEMENTED; + s32 status = 0; u32 time_out; u32 max_time_out = 10; - u16 autoneg_reg; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; - /* - * Set advertisement settings in PHY based on autoneg_advertised - * settings. If autoneg_advertised = 0, then advertise default values - * tnx devices cannot be "forced" to a autoneg 10G and fail. But can - * for a 1G. - */ - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + &autoneg_reg); - if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; - else - autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + + hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &autoneg_reg); - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); + autoneg_reg &= ~ADVERTISE_100FULL; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= ADVERTISE_100FULL; + + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + autoneg_reg); + } /* Restart PHY autonegotiation and wait for completion */ - hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg); + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, &autoneg_reg); autoneg_reg |= MDIO_AN_CTRL1_RESTART; - hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, autoneg_reg); /* Wait for autonegotiation to finish */ for (time_out = 0; time_out < max_time_out; time_out++) { udelay(10); /* Restart PHY autonegotiation and wait for completion */ - status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_reg); + status = hw->phy.ops.read_reg(hw, MDIO_STAT1, + MDIO_MMD_AN, + &autoneg_reg); autoneg_reg &= MDIO_AN_STAT1_COMPLETE; if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) { - status = 0; break; } } - if (time_out == max_time_out) + if (time_out == max_time_out) { status = IXGBE_ERR_LINK_SETUP; + hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); + } return status; } @@ -421,6 +513,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + /* Setup link based on the new speed settings */ hw->phy.ops.setup_link(hw); @@ -461,6 +556,180 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, } /** + * ixgbe_check_phy_link_tnx - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + s32 status = 0; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + + /* Initialize speed and link to default case */ + *link_up = false; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + status = hw->phy.ops.read_reg(hw, + MDIO_STAT1, + MDIO_MMD_VEND1, + &phy_data); + phy_link = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = true; + if (phy_speed == + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + + return status; +} + +/** + * ixgbe_setup_phy_link_tnx - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) +{ + s32 status = 0; + u32 time_out; + u32 max_time_out = 10; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + + hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~ADVERTISE_100FULL; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= ADVERTISE_100FULL; + + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + autoneg_reg); + } + + /* Restart PHY autonegotiation and wait for completion */ + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, &autoneg_reg); + + autoneg_reg |= MDIO_AN_CTRL1_RESTART; + + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, autoneg_reg); + + /* Wait for autonegotiation to finish */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + /* Restart PHY autonegotiation and wait for completion */ + status = hw->phy.ops.read_reg(hw, MDIO_STAT1, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= MDIO_AN_STAT1_COMPLETE; + if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) + break; + } + + if (time_out == max_time_out) { + status = IXGBE_ERR_LINK_SETUP; + hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); + } + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status = 0; + + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, + MDIO_MMD_VEND1, + firmware_version); + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status = 0; + + status = hw->phy.ops.read_reg(hw, AQ_FW_REV, + MDIO_MMD_VEND1, + firmware_version); + + return status; +} + +/** * ixgbe_reset_phy_nl - Performs a PHY reset * @hw: pointer to hardware structure **/ @@ -556,11 +825,10 @@ out: } /** - * ixgbe_identify_sfp_module_generic - Identifies SFP module and assigns - * the PHY type. + * ixgbe_identify_sfp_module_generic - Identifies SFP modules * @hw: pointer to hardware structure * - * Searches for and indentifies the SFP module. Assings appropriate PHY type. + * Searches for and identifies the SFP module and assigns appropriate PHY type. **/ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) { @@ -581,41 +849,62 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) goto out; } - status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_IDENTIFIER, &identifier); - if (status == IXGBE_ERR_SFP_NOT_PRESENT || status == IXGBE_ERR_I2C) { - status = IXGBE_ERR_SFP_NOT_PRESENT; - hw->phy.sfp_type = ixgbe_sfp_type_not_present; - if (hw->phy.type != ixgbe_phy_nl) { - hw->phy.id = 0; - hw->phy.type = ixgbe_phy_unknown; - } - goto out; - } + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; - if (identifier == IXGBE_SFF_IDENTIFIER_SFP) { - hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, - &comp_codes_1g); - hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, - &comp_codes_10g); - hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, - &cable_tech); - - /* ID Module - * ========= - * 0 SFP_DA_CU - * 1 SFP_SR - * 2 SFP_LR - * 3 SFP_DA_CORE0 - 82599-specific - * 4 SFP_DA_CORE1 - 82599-specific - * 5 SFP_SR/LR_CORE0 - 82599-specific - * 6 SFP_SR/LR_CORE1 - 82599-specific - * 7 SFP_act_lmt_DA_CORE0 - 82599-specific - * 8 SFP_act_lmt_DA_CORE1 - 82599-specific - * 9 SFP_1g_cu_CORE0 - 82599-specific - * 10 SFP_1g_cu_CORE1 - 82599-specific - */ + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + + if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 - 82599-specific + * 4 SFP_DA_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + * 7 SFP_act_lmt_DA_CORE0 - 82599-specific + * 8 SFP_act_lmt_DA_CORE1 - 82599-specific + * 9 SFP_1g_cu_CORE0 - 82599-specific + * 10 SFP_1g_cu_CORE1 - 82599-specific + */ if (hw->mac.type == ixgbe_mac_82598EB) { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) hw->phy.sfp_type = ixgbe_sfp_type_da_cu; @@ -647,31 +936,27 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) ixgbe_sfp_type_da_act_lmt_core1; } else { hw->phy.sfp_type = - ixgbe_sfp_type_unknown; + ixgbe_sfp_type_unknown; } - } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + } else if (comp_codes_10g & + (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0; else hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1; - else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_srlr_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_srlr_core1; - else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) + } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { if (hw->bus.lan_id == 0) hw->phy.sfp_type = ixgbe_sfp_type_1g_cu_core0; else hw->phy.sfp_type = ixgbe_sfp_type_1g_cu_core1; - else + } else { hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } } if (hw->phy.sfp_type != stored_sfp_type) @@ -688,16 +973,33 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) /* Determine PHY vendor */ if (hw->phy.type != ixgbe_phy_nl) { hw->phy.id = identifier; - hw->phy.ops.read_i2c_eeprom(hw, + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE0, &oui_bytes[0]); - hw->phy.ops.read_i2c_eeprom(hw, + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE1, &oui_bytes[1]); - hw->phy.ops.read_i2c_eeprom(hw, + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_VENDOR_OUI_BYTE2, &oui_bytes[2]); + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + vendor_oui = ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | @@ -707,7 +1009,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) case IXGBE_SFF_VENDOR_OUI_TYCO: if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) hw->phy.type = - ixgbe_phy_sfp_passive_tyco; + ixgbe_phy_sfp_passive_tyco; break; case IXGBE_SFF_VENDOR_OUI_FTL: if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) @@ -724,7 +1026,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) default: if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) hw->phy.type = - ixgbe_phy_sfp_passive_unknown; + ixgbe_phy_sfp_passive_unknown; else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) hw->phy.type = ixgbe_phy_sfp_active_unknown; @@ -734,7 +1036,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) } } - /* All passive DA cables are supported */ + /* Allow any DA cable vendor */ if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | IXGBE_SFF_DA_ACTIVE_CABLE)) { status = 0; @@ -756,7 +1058,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) goto out; } - /* This is guaranteed to be 82599, no need to check for NULL */ hw->mac.ops.get_device_caps(hw, &enforce_sfp); if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || @@ -776,15 +1077,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) out: return status; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + } + return IXGBE_ERR_SFP_NOT_PRESENT; } /** - * ixgbe_get_sfp_init_sequence_offsets - Checks the MAC's EEPROM to see - * if it supports a given SFP+ module type, if so it returns the offsets to the - * phy init sequence block. + * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence * @hw: pointer to hardware structure * @list_offset: offset to the SFP ID list * @data_offset: offset to the SFP data block + * + * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if + * so it returns the offsets to the phy init sequence block. **/ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, @@ -899,11 +1209,22 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { s32 status = 0; - u32 max_retry = 1; + u32 max_retry = 10; u32 retry = 0; + u16 swfw_mask = 0; bool nack = 1; + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + do { + if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) { + status = IXGBE_ERR_SWFW_SYNC; + goto read_byte_out; + } + ixgbe_i2c_start(hw); /* Device Address and write indication */ @@ -946,6 +1267,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, break; fail: + ixgbe_release_swfw_sync(hw, swfw_mask); + msleep(100); ixgbe_i2c_bus_clear(hw); retry++; if (retry < max_retry) @@ -955,6 +1278,9 @@ fail: } while (retry < max_retry); + ixgbe_release_swfw_sync(hw, swfw_mask); + +read_byte_out: return status; } @@ -973,6 +1299,17 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, s32 status = 0; u32 max_retry = 1; u32 retry = 0; + u16 swfw_mask = 0; + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + + if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) { + status = IXGBE_ERR_SWFW_SYNC; + goto write_byte_out; + } do { ixgbe_i2c_start(hw); @@ -1013,6 +1350,9 @@ fail: hw_dbg(hw, "I2C byte write error.\n"); } while (retry < max_retry); + ixgbe_release_swfw_sync(hw, swfw_mask); + +write_byte_out: return status; } @@ -1331,6 +1671,8 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); u32 i; + ixgbe_i2c_start(hw); + ixgbe_set_i2c_data(hw, &i2cctl, 1); for (i = 0; i < 9; i++) { @@ -1345,91 +1687,13 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) udelay(IXGBE_I2C_T_LOW); } + ixgbe_i2c_start(hw); + /* Put the i2c bus back to default state */ ixgbe_i2c_stop(hw); } /** - * ixgbe_check_phy_link_tnx - Determine link and speed status - * @hw: pointer to hardware structure - * - * Reads the VS1 register to determine if link is up and the current speed for - * the PHY. - **/ -s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up) -{ - s32 status = 0; - u32 time_out; - u32 max_time_out = 10; - u16 phy_link = 0; - u16 phy_speed = 0; - u16 phy_data = 0; - - /* Initialize speed and link to default case */ - *link_up = false; - *speed = IXGBE_LINK_SPEED_10GB_FULL; - - /* - * Check current speed and link status of the PHY register. - * This is a vendor specific register and may have to - * be changed for other copper PHYs. - */ - for (time_out = 0; time_out < max_time_out; time_out++) { - udelay(10); - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, - MDIO_MMD_VEND1, - &phy_data); - phy_link = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; - phy_speed = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; - if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { - *link_up = true; - if (phy_speed == - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - } - } - - return status; -} - -/** - * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status = 0; - - status = hw->phy.ops.read_reg(hw, TNX_FW_REV, MDIO_MMD_VEND1, - firmware_version); - - return status; -} - -/** - * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version -**/ -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status = 0; - - status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1, - firmware_version); - - return status; -} - -/** * ixgbe_tn_check_overtemp - Checks if an overtemp occured. * @hw: pointer to hardware structure * diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index e2c6b7eac641..197bdd13106a 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -58,6 +58,10 @@ #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 +/* Flow control defines */ +#define IXGBE_TAF_SYM_PAUSE 0x400 +#define IXGBE_TAF_ASM_PAUSE 0x800 + /* Bit-shift macros */ #define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 #define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 @@ -104,6 +108,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up); +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, u16 *firmware_version); s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 187b3a16ec1f..6e50d8328942 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -110,6 +110,33 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); } +void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf) +{ + struct ixgbe_hw *hw = &adapter->hw; + int new_mtu = msgbuf[1]; + u32 max_frs; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Only X540 supports jumbo frames in IOV mode */ + if (adapter->hw.mac.type != ixgbe_mac_X540) + return; + + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) { + e_err(drv, "VF mtu %d out of range\n", new_mtu); + return; + } + + max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & + IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; + if (max_frs < new_mtu) { + max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); + } + + e_info(hw, "VF requests change max MTU to %d\n", new_mtu); +} + static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) { u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); @@ -302,7 +329,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) hash_list, vf); break; case IXGBE_VF_SET_LPE: - WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE); + ixgbe_set_vf_lpe(adapter, msgbuf); break; case IXGBE_VF_SET_VLAN: add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) @@ -451,9 +478,90 @@ out: return err; } +static int ixgbe_link_mbps(int internal_link_speed) +{ + switch (internal_link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + return 100; + case IXGBE_LINK_SPEED_1GB_FULL: + return 1000; + case IXGBE_LINK_SPEED_10GB_FULL: + return 10000; + default: + return 0; + } +} + +static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, + int link_speed) +{ + int rf_dec, rf_int; + u32 bcnrc_val; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); + rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; + + bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; + bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) & + IXGBE_RTTBCNRC_RF_INT_MASK); + bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); + } else { + bcnrc_val = 0; + } + + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */ + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); +} + +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) +{ + int actual_link_speed, i; + bool reset_rate = false; + + /* VF Tx rate limit was not set */ + if (adapter->vf_rate_link_speed == 0) + return; + + actual_link_speed = ixgbe_link_mbps(adapter->link_speed); + if (actual_link_speed != adapter->vf_rate_link_speed) { + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit rate " + "is disabled\n"); + } + + for (i = 0; i < adapter->num_vfs; i++) { + if (reset_rate) + adapter->vfinfo[i].tx_rate = 0; + + ixgbe_set_vf_rate_limit(&adapter->hw, i, + adapter->vfinfo[i].tx_rate, + actual_link_speed); + } +} + int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) { - return -EOPNOTSUPP; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int actual_link_speed; + + actual_link_speed = ixgbe_link_mbps(adapter->link_speed); + if ((vf >= adapter->num_vfs) || (!adapter->link_up) || + (tx_rate > actual_link_speed) || (actual_link_speed != 10000) || + ((tx_rate != 0) && (tx_rate <= 10))) + /* rate limit cannot be set to 10Mb or less in 10Gb adapters */ + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; + adapter->vfinfo[vf].tx_rate = (u16)tx_rate; + ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); + + return 0; } int ixgbe_ndo_get_vf_config(struct net_device *netdev, @@ -464,7 +572,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, return -EINVAL; ivi->vf = vf; memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); - ivi->tx_rate = 0; + ivi->tx_rate = adapter->vfinfo[vf].tx_rate; ivi->vlan = adapter->vfinfo[vf].pf_vlan; ivi->qos = adapter->vfinfo[vf].pf_qos; return 0; diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h index 49dc14debef7..34175564bb78 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ixgbe/ixgbe_sriov.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -40,6 +40,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); #endif /* _IXGBE_SRIOV_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index fd3358f54139..25c1fb7eda06 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -91,7 +91,7 @@ /* General Receive Control */ #define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ -#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */ +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ #define IXGBE_VPDDIAG0 0x10204 #define IXGBE_VPDDIAG1 0x10208 @@ -342,7 +342,7 @@ /* Wake Up Control */ #define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ #define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ -#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/ +#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ /* Wake Up Filter Control */ #define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ @@ -533,6 +533,12 @@ #define IXGBE_RTTDTECC 0x04990 #define IXGBE_RTTDTECC_NO_BCN 0x00000100 #define IXGBE_RTTBCNRC 0x04984 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 +#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +#define IXGBE_RTTBCNRC_RF_INT_MASK \ + (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) + /* FCoE registers */ #define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ @@ -659,6 +665,8 @@ #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ #define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ #define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ #define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ #define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ #define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ @@ -669,6 +677,11 @@ #define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ #define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ #define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ +#define IXGBE_PCRC8ECL 0x0E810 +#define IXGBE_PCRC8ECH 0x0E811 +#define IXGBE_PCRC8ECH_MASK 0x1F +#define IXGBE_LDPCECL 0x0E820 +#define IXGBE_LDPCECH 0x0E821 /* Management */ #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ @@ -1002,6 +1015,13 @@ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ +/* MII clause 22/28 definitions */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_AUTONEG_REG 0x0 + #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 #define IXGBE_MAX_PHY_ADDR 32 @@ -1614,6 +1634,8 @@ #define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ /* PCI Bus Info */ +#define IXGBE_PCI_DEVICE_STATUS 0xAA +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 #define IXGBE_PCI_LINK_STATUS 0xB2 #define IXGBE_PCI_DEVICE_CONTROL2 0xC8 #define IXGBE_PCI_LINK_WIDTH 0x3F0 @@ -1680,6 +1702,8 @@ #define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ #define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 #define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ #define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ @@ -2240,6 +2264,7 @@ enum ixgbe_mac_type { enum ixgbe_phy_type { ixgbe_phy_unknown = 0, + ixgbe_phy_none, ixgbe_phy_tn, ixgbe_phy_aq, ixgbe_phy_cu_unknown, @@ -2328,32 +2353,31 @@ enum ixgbe_bus_type { /* PCI bus speeds */ enum ixgbe_bus_speed { ixgbe_bus_speed_unknown = 0, - ixgbe_bus_speed_33, - ixgbe_bus_speed_66, - ixgbe_bus_speed_100, - ixgbe_bus_speed_120, - ixgbe_bus_speed_133, - ixgbe_bus_speed_2500, - ixgbe_bus_speed_5000, + ixgbe_bus_speed_33 = 33, + ixgbe_bus_speed_66 = 66, + ixgbe_bus_speed_100 = 100, + ixgbe_bus_speed_120 = 120, + ixgbe_bus_speed_133 = 133, + ixgbe_bus_speed_2500 = 2500, + ixgbe_bus_speed_5000 = 5000, ixgbe_bus_speed_reserved }; /* PCI bus widths */ enum ixgbe_bus_width { ixgbe_bus_width_unknown = 0, - ixgbe_bus_width_pcie_x1, - ixgbe_bus_width_pcie_x2, + ixgbe_bus_width_pcie_x1 = 1, + ixgbe_bus_width_pcie_x2 = 2, ixgbe_bus_width_pcie_x4 = 4, ixgbe_bus_width_pcie_x8 = 8, - ixgbe_bus_width_32, - ixgbe_bus_width_64, + ixgbe_bus_width_32 = 32, + ixgbe_bus_width_64 = 64, ixgbe_bus_width_reserved }; struct ixgbe_addr_filter_info { u32 num_mc_addrs; u32 rar_used_count; - u32 mc_addr_in_rar_count; u32 mta_in_use; u32 overflow_promisc; bool uc_set_promisc; @@ -2491,6 +2515,8 @@ struct ixgbe_mac_operations { s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); s32 (*setup_sfp)(struct ixgbe_hw *); s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); + void (*release_swfw_sync)(struct ixgbe_hw *, u16); /* Link */ void (*disable_tx_laser)(struct ixgbe_hw *); @@ -2513,7 +2539,6 @@ struct ixgbe_mac_operations { s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); s32 (*init_rx_addrs)(struct ixgbe_hw *); - s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); @@ -2554,6 +2579,7 @@ struct ixgbe_eeprom_info { u16 address_bits; }; +#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 struct ixgbe_mac_info { struct ixgbe_mac_operations ops; enum ixgbe_mac_type type; @@ -2564,6 +2590,8 @@ struct ixgbe_mac_info { u16 wwnn_prefix; /* prefix for World Wide Port Name (WWPN) */ u16 wwpn_prefix; +#define IXGBE_MAX_MTA 128 + u32 mta_shadow[IXGBE_MAX_MTA]; s32 mc_filter_type; u32 mcft_size; u32 vft_size; @@ -2576,6 +2604,7 @@ struct ixgbe_mac_info { u32 orig_autoc2; bool orig_link_settings_stored; bool autotry_restart; + u8 flags; }; struct ixgbe_phy_info { @@ -2682,7 +2711,9 @@ struct ixgbe_info { #define IXGBE_ERR_EEPROM_VERSION -24 #define IXGBE_ERR_NO_SPACE -25 #define IXGBE_ERR_OVERTEMP -26 -#define IXGBE_ERR_RAR_INDEX -27 +#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 +#define IXGBE_ERR_FC_NOT_SUPPORTED -28 +#define IXGBE_ERR_FLOW_CONTROL -29 #define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 #define IXGBE_ERR_PBA_SECTION -31 #define IXGBE_ERR_INVALID_ARGUMENT -32 diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c index f2518b01067d..f47e93fe32be 100644 --- a/drivers/net/ixgbe/ixgbe_x540.c +++ b/drivers/net/ixgbe/ixgbe_x540.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2010 Intel Corporation. + Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -31,7 +31,6 @@ #include "ixgbe.h" #include "ixgbe_phy.h" -//#include "ixgbe_mbx.h" #define IXGBE_X540_MAX_TX_QUEUES 128 #define IXGBE_X540_MAX_RX_QUEUES 128 @@ -110,12 +109,9 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) * Prevent the PCI-E bus from from hanging by disabling PCI-E master * access and verify no pending requests before reset */ - status = ixgbe_disable_pcie_master(hw); - if (status != 0) { - status = IXGBE_ERR_MASTER_REQUESTS_PENDING; - hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); - } + ixgbe_disable_pcie_master(hw); +mac_reset_top: /* * Issue global reset to the MAC. Needs to be SW reset if link is up. * If link reset is used when link is up, it might reset the PHY when @@ -148,6 +144,19 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) hw_dbg(hw, "Reset polling failed to complete.\n"); } + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. We use 1usec since that is + * what is needed for ixgbe_disable_pcie_master(). The second reset + * then clears out any effects of those events. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + udelay(1); + goto mac_reset_top; + } + /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; @@ -191,7 +200,7 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ - hw->mac.num_rar_entries = 128; + hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent mac address */ @@ -242,8 +251,11 @@ static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) } /** - * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params - * @hw: pointer to hardware structure + * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. **/ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) { @@ -262,7 +274,7 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) IXGBE_EEPROM_WORD_SIZE_SHIFT); hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", - eeprom->type, eeprom->word_size); + eeprom->type, eeprom->word_size); } return 0; @@ -278,7 +290,7 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) { s32 status; - if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) status = ixgbe_read_eerd_generic(hw, offset, data); else status = IXGBE_ERR_SWFW_SYNC; @@ -311,7 +323,7 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) (data << IXGBE_EEPROM_RW_REG_DATA) | IXGBE_EEPROM_RW_REG_START; - if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) { + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); if (status != 0) { hw_dbg(hw, "Eeprom write EEWR timed out\n"); @@ -676,7 +688,6 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .set_vmdq = &ixgbe_set_vmdq_generic, .clear_vmdq = &ixgbe_clear_vmdq_generic, .init_rx_addrs = &ixgbe_init_rx_addrs_generic, - .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic, .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, .enable_mc = &ixgbe_enable_mc_generic, .disable_mc = &ixgbe_disable_mc_generic, @@ -687,6 +698,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .setup_sfp = NULL, .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, + .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, + .release_swfw_sync = &ixgbe_release_swfw_sync_X540, }; static struct ixgbe_eeprom_operations eeprom_ops_X540 = { @@ -702,7 +715,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = { .identify = &ixgbe_identify_phy_generic, .identify_sfp = &ixgbe_identify_sfp_module_generic, .init = NULL, - .reset = &ixgbe_reset_phy_generic, + .reset = NULL, .read_reg = &ixgbe_read_phy_reg_generic, .write_reg = &ixgbe_write_phy_reg_generic, .setup_link = &ixgbe_setup_phy_link_generic, diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h index de643eb2ada6..78abb6f1a866 100644 --- a/drivers/net/ixgbevf/defines.h +++ b/drivers/net/ixgbevf/defines.h @@ -65,6 +65,8 @@ typedef u32 ixgbe_link_speed; #define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ #define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ #define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 /* DCA Control */ #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c index fa29b3c8c464..0563ab29264e 100644 --- a/drivers/net/ixgbevf/ethtool.c +++ b/drivers/net/ixgbevf/ethtool.c @@ -172,7 +172,7 @@ static char *ixgbevf_reg_names[] = { "IXGBE_VFSTATUS", "IXGBE_VFLINKS", "IXGBE_VFRXMEMWRAP", - "IXGBE_VFRTIMER", + "IXGBE_VFFRTIMER", "IXGBE_VTEICR", "IXGBE_VTEICS", "IXGBE_VTEIMS", @@ -240,7 +240,7 @@ static void ixgbevf_get_regs(struct net_device *netdev, regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); - regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER); + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); /* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h index a63efcb2cf1b..b703f60be3b7 100644 --- a/drivers/net/ixgbevf/ixgbevf.h +++ b/drivers/net/ixgbevf/ixgbevf.h @@ -207,7 +207,6 @@ struct ixgbevf_adapter { u64 hw_tso_ctxt; u64 hw_tso6_ctxt; u32 tx_timeout_count; - bool detect_tx_hung; /* RX */ struct ixgbevf_ring *rx_ring; /* One per active queue */ diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index 464e6c9d3fc2..054ab05b7c6a 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c @@ -49,9 +49,9 @@ char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = - "Intel(R) 82599 Virtual Function"; + "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; -#define DRV_VERSION "1.0.19-k0" +#define DRV_VERSION "2.0.0-k2" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2010 Intel Corporation."; @@ -107,7 +107,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, } /* - * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors * @adapter: pointer to adapter struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes * @queue: queue to map the corresponding interrupt to @@ -162,42 +162,6 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, /* tx_buffer_info must be completely set up in the transmit path */ } -static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring, - unsigned int eop) -{ - struct ixgbe_hw *hw = &adapter->hw; - u32 head, tail; - - /* Detect a transmit hang in hardware, this serializes the - * check with the clearing of time_stamp and movement of eop */ - head = readl(hw->hw_addr + tx_ring->head); - tail = readl(hw->hw_addr + tx_ring->tail); - adapter->detect_tx_hung = false; - if ((head != tail) && - tx_ring->tx_buffer_info[eop].time_stamp && - time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) { - /* detected Tx unit hang */ - union ixgbe_adv_tx_desc *tx_desc; - tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); - printk(KERN_ERR "Detected Tx Unit Hang\n" - " Tx Queue <%d>\n" - " TDH, TDT <%x>, <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "tx_buffer_info[next_to_clean]\n" - " time_stamp <%lx>\n" - " jiffies <%lx>\n", - tx_ring->queue_index, - head, tail, - tx_ring->next_to_use, eop, - tx_ring->tx_buffer_info[eop].time_stamp, jiffies); - return true; - } - - return false; -} - #define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) @@ -293,16 +257,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, #endif } - if (adapter->detect_tx_hung) { - if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) { - /* schedule immediate reset if we believe we hung */ - printk(KERN_INFO - "tx hang %d detected, resetting adapter\n", - adapter->tx_timeout_count + 1); - ixgbevf_tx_timeout(adapter->netdev); - } - } - /* re-arm the interrupt */ if ((count >= tx_ring->work_limit) && (!test_bit(__IXGBEVF_DOWN, &adapter->state))) { @@ -334,7 +288,6 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, struct ixgbevf_adapter *adapter = q_vector->adapter; bool is_vlan = (status & IXGBE_RXD_STAT_VP); u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); - int ret; if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) { if (adapter->vlgrp && is_vlan) @@ -345,9 +298,9 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, napi_gro_receive(&q_vector->napi, skb); } else { if (adapter->vlgrp && is_vlan) - ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag); + vlan_hwaccel_rx(skb, adapter->vlgrp, tag); else - ret = netif_rx(skb); + netif_rx(skb); } } @@ -1017,7 +970,7 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data) } /** - * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues) + * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues) * @irq: unused * @data: pointer to our q_vector struct for this interrupt vector **/ @@ -1665,6 +1618,11 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) j = adapter->rx_ring[i].reg_idx; rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); rxdctl |= IXGBE_RXDCTL_ENABLE; + if (hw->mac.type == ixgbe_mac_X540_vf) { + rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; + rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | + IXGBE_RXDCTL_RLPML_EN); + } IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); ixgbevf_rx_desc_queue_enable(adapter, i); } @@ -1967,7 +1925,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, } /* - * ixgbe_set_num_queues: Allocate queues for device, feature dependant + * ixgbevf_set_num_queues: Allocate queues for device, feature dependant * @adapter: board private structure to initialize * * This is the top level queue allocation routine. The order here is very @@ -2216,7 +2174,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; - pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + hw->revision_id = pdev->revision; hw->subsystem_vendor_id = pdev->subsystem_vendor; hw->subsystem_device_id = pdev->subsystem_device; @@ -2410,9 +2368,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work) 10 : 1); netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); - } else { - /* Force detection of hung controller */ - adapter->detect_tx_hung = true; } } else { adapter->link_up = false; @@ -2427,9 +2382,6 @@ static void ixgbevf_watchdog_task(struct work_struct *work) ixgbevf_update_stats(adapter); pf_has_reset: - /* Force detection of hung controller every watchdog period */ - adapter->detect_tx_hung = true; - /* Reset the timer */ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, @@ -3217,10 +3169,16 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; + u32 msg[2]; + + if (adapter->hw.mac.type == ixgbe_mac_X540_vf) + max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; /* MTU < 68 is an error and causes problems on some kernels */ - if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) + if ((new_mtu < 68) || (max_frame > max_possible_frame)) return -EINVAL; hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", @@ -3228,6 +3186,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; + msg[0] = IXGBE_VF_SET_LPE; + msg[1] = max_frame; + hw->mbx.ops.write_posted(hw, msg, 2); + if (netif_running(netdev)) ixgbevf_reinit_locked(adapter); @@ -3272,8 +3234,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { static void ixgbevf_assign_netdev_ops(struct net_device *dev) { - struct ixgbevf_adapter *adapter; - adapter = netdev_priv(dev); dev->netdev_ops = &ixgbe_netdev_ops; ixgbevf_set_ethtool_ops(dev); dev->watchdog_timeo = 5 * HZ; @@ -3519,9 +3479,9 @@ static struct pci_driver ixgbevf_driver = { }; /** - * ixgbe_init_module - Driver Registration Routine + * ixgbevf_init_module - Driver Registration Routine * - * ixgbe_init_module is the first routine called when the driver is + * ixgbevf_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. **/ static int __init ixgbevf_init_module(void) @@ -3539,9 +3499,9 @@ static int __init ixgbevf_init_module(void) module_init(ixgbevf_init_module); /** - * ixgbe_exit_module - Driver Exit Cleanup Routine + * ixgbevf_exit_module - Driver Exit Cleanup Routine * - * ixgbe_exit_module is called just before the driver is removed + * ixgbevf_exit_module is called just before the driver is removed * from memory. **/ static void __exit ixgbevf_exit_module(void) @@ -3551,7 +3511,7 @@ static void __exit ixgbevf_exit_module(void) #ifdef DEBUG /** - * ixgbe_get_hw_dev_name - return device name string + * ixgbevf_get_hw_dev_name - return device name string * used by hardware layer to print debugging information **/ char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h index fb80ca1bcc93..189200eeca26 100644 --- a/drivers/net/ixgbevf/regs.h +++ b/drivers/net/ixgbevf/regs.h @@ -31,7 +31,7 @@ #define IXGBE_VFCTRL 0x00000 #define IXGBE_VFSTATUS 0x00008 #define IXGBE_VFLINKS 0x00010 -#define IXGBE_VFRTIMER 0x00048 +#define IXGBE_VFFRTIMER 0x00048 #define IXGBE_VFRXMEMWRAP 0x03190 #define IXGBE_VTEICR 0x00100 #define IXGBE_VTEICS 0x00104 diff --git a/drivers/net/jme.c b/drivers/net/jme.c index e97ebef3cf47..f690474f4409 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c @@ -161,6 +161,67 @@ jme_setup_wakeup_frame(struct jme_adapter *jme, } static inline void +jme_mac_rxclk_off(struct jme_adapter *jme) +{ + jme->reg_gpreg1 |= GPREG1_RXCLKOFF; + jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); +} + +static inline void +jme_mac_rxclk_on(struct jme_adapter *jme) +{ + jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF; + jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1); +} + +static inline void +jme_mac_txclk_off(struct jme_adapter *jme) +{ + jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC); + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_mac_txclk_on(struct jme_adapter *jme) +{ + u32 speed = jme->reg_ghc & GHC_SPEED; + if (speed == GHC_SPEED_1000M) + jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; + else + jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_reset_ghc_speed(struct jme_adapter *jme) +{ + jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX); + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_reset_250A2_workaround(struct jme_adapter *jme) +{ + jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | + GPREG1_RSSPATCH); + jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); +} + +static inline void +jme_assert_ghc_reset(struct jme_adapter *jme) +{ + jme->reg_ghc |= GHC_SWRST; + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void +jme_clear_ghc_reset(struct jme_adapter *jme) +{ + jme->reg_ghc &= ~GHC_SWRST; + jwrite32f(jme, JME_GHC, jme->reg_ghc); +} + +static inline void jme_reset_mac_processor(struct jme_adapter *jme) { static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; @@ -168,9 +229,24 @@ jme_reset_mac_processor(struct jme_adapter *jme) u32 gpreg0; int i; - jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST); - udelay(2); - jwrite32(jme, JME_GHC, jme->reg_ghc); + jme_reset_ghc_speed(jme); + jme_reset_250A2_workaround(jme); + + jme_mac_rxclk_on(jme); + jme_mac_txclk_on(jme); + udelay(1); + jme_assert_ghc_reset(jme); + udelay(1); + jme_mac_rxclk_off(jme); + jme_mac_txclk_off(jme); + udelay(1); + jme_clear_ghc_reset(jme); + udelay(1); + jme_mac_rxclk_on(jme); + jme_mac_txclk_on(jme); + udelay(1); + jme_mac_rxclk_off(jme); + jme_mac_txclk_off(jme); jwrite32(jme, JME_RXDBA_LO, 0x00000000); jwrite32(jme, JME_RXDBA_HI, 0x00000000); @@ -190,14 +266,6 @@ jme_reset_mac_processor(struct jme_adapter *jme) else gpreg0 = GPREG0_DEFAULT; jwrite32(jme, JME_GPREG0, gpreg0); - jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT); -} - -static inline void -jme_reset_ghc_speed(struct jme_adapter *jme) -{ - jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX); - jwrite32(jme, JME_GHC, jme->reg_ghc); } static inline void @@ -336,13 +404,13 @@ jme_linkstat_from_phy(struct jme_adapter *jme) } static inline void -jme_set_phyfifoa(struct jme_adapter *jme) +jme_set_phyfifo_5level(struct jme_adapter *jme) { jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004); } static inline void -jme_set_phyfifob(struct jme_adapter *jme) +jme_set_phyfifo_8level(struct jme_adapter *jme) { jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000); } @@ -351,7 +419,7 @@ static int jme_check_link(struct net_device *netdev, int testonly) { struct jme_adapter *jme = netdev_priv(netdev); - u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1; + u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr; char linkmsg[64]; int rc = 0; @@ -414,23 +482,21 @@ jme_check_link(struct net_device *netdev, int testonly) jme->phylink = phylink; - ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX | - GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE | - GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY); + /* + * The speed/duplex setting of jme->reg_ghc already cleared + * by jme_reset_mac_processor() + */ switch (phylink & PHY_LINK_SPEED_MASK) { case PHY_LINK_SPEED_10M: - ghc |= GHC_SPEED_10M | - GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; + jme->reg_ghc |= GHC_SPEED_10M; strcat(linkmsg, "10 Mbps, "); break; case PHY_LINK_SPEED_100M: - ghc |= GHC_SPEED_100M | - GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE; + jme->reg_ghc |= GHC_SPEED_100M; strcat(linkmsg, "100 Mbps, "); break; case PHY_LINK_SPEED_1000M: - ghc |= GHC_SPEED_1000M | - GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY; + jme->reg_ghc |= GHC_SPEED_1000M; strcat(linkmsg, "1000 Mbps, "); break; default: @@ -439,42 +505,40 @@ jme_check_link(struct net_device *netdev, int testonly) if (phylink & PHY_LINK_DUPLEX) { jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT); - ghc |= GHC_DPX; + jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX); + jme->reg_ghc |= GHC_DPX; } else { jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT | TXMCS_BACKOFF | TXMCS_CARRIERSENSE | TXMCS_COLLISION); - jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN | - ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) | - TXTRHD_TXREN | - ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL)); + jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX); } - gpreg1 = GPREG1_DEFAULT; + jwrite32(jme, JME_GHC, jme->reg_ghc); + if (is_buggy250(jme->pdev->device, jme->chiprev)) { + jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH | + GPREG1_RSSPATCH); if (!(phylink & PHY_LINK_DUPLEX)) - gpreg1 |= GPREG1_HALFMODEPATCH; + jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH; switch (phylink & PHY_LINK_SPEED_MASK) { case PHY_LINK_SPEED_10M: - jme_set_phyfifoa(jme); - gpreg1 |= GPREG1_RSSPATCH; + jme_set_phyfifo_8level(jme); + jme->reg_gpreg1 |= GPREG1_RSSPATCH; break; case PHY_LINK_SPEED_100M: - jme_set_phyfifob(jme); - gpreg1 |= GPREG1_RSSPATCH; + jme_set_phyfifo_5level(jme); + jme->reg_gpreg1 |= GPREG1_RSSPATCH; break; case PHY_LINK_SPEED_1000M: - jme_set_phyfifoa(jme); + jme_set_phyfifo_8level(jme); break; default: break; } } - - jwrite32(jme, JME_GPREG1, gpreg1); - jwrite32(jme, JME_GHC, ghc); - jme->reg_ghc = ghc; + jwrite32(jme, JME_GPREG1, jme->reg_gpreg1); strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ? "Full-Duplex, " : @@ -613,10 +677,14 @@ jme_enable_tx_engine(struct jme_adapter *jme) * Enable TX Engine */ wmb(); - jwrite32(jme, JME_TXCS, jme->reg_txcs | + jwrite32f(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0 | TXCS_ENABLE); + /* + * Start clock for TX MAC Processor + */ + jme_mac_txclk_on(jme); } static inline void @@ -651,6 +719,11 @@ jme_disable_tx_engine(struct jme_adapter *jme) if (!i) pr_err("Disable TX engine timeout\n"); + + /* + * Stop clock for TX MAC Processor + */ + jme_mac_txclk_off(jme); } static void @@ -825,16 +898,22 @@ jme_enable_rx_engine(struct jme_adapter *jme) /* * Setup Unicast Filter */ + jme_set_unicastaddr(jme->dev); jme_set_multi(jme->dev); /* * Enable RX Engine */ wmb(); - jwrite32(jme, JME_RXCS, jme->reg_rxcs | + jwrite32f(jme, JME_RXCS, jme->reg_rxcs | RXCS_QUEUESEL_Q0 | RXCS_ENABLE | RXCS_QST); + + /* + * Start clock for RX MAC Processor + */ + jme_mac_rxclk_on(jme); } static inline void @@ -871,10 +950,40 @@ jme_disable_rx_engine(struct jme_adapter *jme) if (!i) pr_err("Disable RX engine timeout\n"); + /* + * Stop clock for RX MAC Processor + */ + jme_mac_rxclk_off(jme); +} + +static u16 +jme_udpsum(struct sk_buff *skb) +{ + u16 csum = 0xFFFFu; + + if (skb->len < (ETH_HLEN + sizeof(struct iphdr))) + return csum; + if (skb->protocol != htons(ETH_P_IP)) + return csum; + skb_set_network_header(skb, ETH_HLEN); + if ((ip_hdr(skb)->protocol != IPPROTO_UDP) || + (skb->len < (ETH_HLEN + + (ip_hdr(skb)->ihl << 2) + + sizeof(struct udphdr)))) { + skb_reset_network_header(skb); + return csum; + } + skb_set_transport_header(skb, + ETH_HLEN + (ip_hdr(skb)->ihl << 2)); + csum = udp_hdr(skb)->check; + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + + return csum; } static int -jme_rxsum_ok(struct jme_adapter *jme, u16 flags) +jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb) { if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4))) return false; @@ -887,7 +996,7 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags) } if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS)) - == RXWBFLAG_UDPON)) { + == RXWBFLAG_UDPON) && jme_udpsum(skb)) { if (flags & RXWBFLAG_IPV4) netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n"); return false; @@ -935,7 +1044,7 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) skb_put(skb, framesize); skb->protocol = eth_type_trans(skb, jme->dev); - if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags))) + if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb)) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); @@ -1207,7 +1316,6 @@ jme_link_change_tasklet(unsigned long arg) tasklet_disable(&jme->rxempty_task); if (netif_carrier_ok(netdev)) { - jme_reset_ghc_speed(jme); jme_disable_rx_engine(jme); jme_disable_tx_engine(jme); jme_reset_mac_processor(jme); @@ -1577,6 +1685,38 @@ jme_free_irq(struct jme_adapter *jme) } static inline void +jme_new_phy_on(struct jme_adapter *jme) +{ + u32 reg; + + reg = jread32(jme, JME_PHY_PWR); + reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | + PHY_PWR_DWN2 | PHY_PWR_CLKSEL); + jwrite32(jme, JME_PHY_PWR, reg); + + pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); + reg &= ~PE1_GPREG0_PBG; + reg |= PE1_GPREG0_ENBG; + pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); +} + +static inline void +jme_new_phy_off(struct jme_adapter *jme) +{ + u32 reg; + + reg = jread32(jme, JME_PHY_PWR); + reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW | + PHY_PWR_DWN2 | PHY_PWR_CLKSEL; + jwrite32(jme, JME_PHY_PWR, reg); + + pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, ®); + reg &= ~PE1_GPREG0_PBG; + reg |= PE1_GPREG0_PDD3COLD; + pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg); +} + +static inline void jme_phy_on(struct jme_adapter *jme) { u32 bmcr; @@ -1584,6 +1724,22 @@ jme_phy_on(struct jme_adapter *jme) bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); bmcr &= ~BMCR_PDOWN; jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + + if (new_phy_power_ctrl(jme->chip_main_rev)) + jme_new_phy_on(jme); +} + +static inline void +jme_phy_off(struct jme_adapter *jme) +{ + u32 bmcr; + + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr |= BMCR_PDOWN; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); + + if (new_phy_power_ctrl(jme->chip_main_rev)) + jme_new_phy_off(jme); } static int @@ -1606,12 +1762,11 @@ jme_open(struct net_device *netdev) jme_start_irq(jme); - if (test_bit(JME_FLAG_SSET, &jme->flags)) { - jme_phy_on(jme); + jme_phy_on(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) jme_set_settings(netdev, &jme->old_ecmd); - } else { + else jme_reset_phy_processor(jme); - } jme_reset_link(jme); @@ -1657,12 +1812,6 @@ jme_wait_link(struct jme_adapter *jme) } } -static inline void -jme_phy_off(struct jme_adapter *jme) -{ - jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN); -} - static void jme_powersave_phy(struct jme_adapter *jme) { @@ -1696,7 +1845,6 @@ jme_close(struct net_device *netdev) tasklet_disable(&jme->rxclean_task); tasklet_disable(&jme->rxempty_task); - jme_reset_ghc_speed(jme); jme_disable_rx_engine(jme); jme_disable_tx_engine(jme); jme_reset_mac_processor(jme); @@ -1993,27 +2141,34 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } +static void +jme_set_unicastaddr(struct net_device *netdev) +{ + struct jme_adapter *jme = netdev_priv(netdev); + u32 val; + + val = (netdev->dev_addr[3] & 0xff) << 24 | + (netdev->dev_addr[2] & 0xff) << 16 | + (netdev->dev_addr[1] & 0xff) << 8 | + (netdev->dev_addr[0] & 0xff); + jwrite32(jme, JME_RXUMA_LO, val); + val = (netdev->dev_addr[5] & 0xff) << 8 | + (netdev->dev_addr[4] & 0xff); + jwrite32(jme, JME_RXUMA_HI, val); +} + static int jme_set_macaddr(struct net_device *netdev, void *p) { struct jme_adapter *jme = netdev_priv(netdev); struct sockaddr *addr = p; - u32 val; if (netif_running(netdev)) return -EBUSY; spin_lock_bh(&jme->macaddr_lock); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - - val = (addr->sa_data[3] & 0xff) << 24 | - (addr->sa_data[2] & 0xff) << 16 | - (addr->sa_data[1] & 0xff) << 8 | - (addr->sa_data[0] & 0xff); - jwrite32(jme, JME_RXUMA_LO, val); - val = (addr->sa_data[5] & 0xff) << 8 | - (addr->sa_data[4] & 0xff); - jwrite32(jme, JME_RXUMA_HI, val); + jme_set_unicastaddr(netdev); spin_unlock_bh(&jme->macaddr_lock); return 0; @@ -2731,6 +2886,8 @@ jme_check_hw_ver(struct jme_adapter *jme) jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT; jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT; + jme->chip_main_rev = jme->chiprev & 0xF; + jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF; } static const struct net_device_ops jme_netdev_ops = { @@ -2880,6 +3037,7 @@ jme_init_one(struct pci_dev *pdev, jme->reg_rxmcs = RXMCS_DEFAULT; jme->reg_txpfc = 0; jme->reg_pmcs = PMCS_MFEN; + jme->reg_gpreg1 = GPREG1_DEFAULT; set_bit(JME_FLAG_TXCSUM, &jme->flags); set_bit(JME_FLAG_TSO, &jme->flags); @@ -2936,8 +3094,8 @@ jme_init_one(struct pci_dev *pdev, jme->mii_if.mdio_write = jme_mdio_write; jme_clear_pm(jme); - jme_set_phyfifoa(jme); - pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev); + jme_set_phyfifo_5level(jme); + jme->pcirev = pdev->revision; if (!jme->fpgaver) jme_phy_init(jme); jme_phy_off(jme); @@ -2964,14 +3122,14 @@ jme_init_one(struct pci_dev *pdev, goto err_out_unmap; } - netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n", + netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n", (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ? "JMC250 Gigabit Ethernet" : (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ? "JMC260 Fast Ethernet" : "Unknown", (jme->fpgaver != 0) ? " (FPGA)" : "", (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev, - jme->rev, netdev->dev_addr); + jme->pcirev, netdev->dev_addr); return 0; @@ -3035,7 +3193,6 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state) jme_polling_mode(jme); jme_stop_pcc_timer(jme); - jme_reset_ghc_speed(jme); jme_disable_rx_engine(jme); jme_disable_tx_engine(jme); jme_reset_mac_processor(jme); @@ -3066,12 +3223,11 @@ jme_resume(struct pci_dev *pdev) jme_clear_pm(jme); pci_restore_state(pdev); - if (test_bit(JME_FLAG_SSET, &jme->flags)) { - jme_phy_on(jme); + jme_phy_on(jme); + if (test_bit(JME_FLAG_SSET, &jme->flags)) jme_set_settings(netdev, &jme->old_ecmd); - } else { + else jme_reset_phy_processor(jme); - } jme_start_irq(jme); netif_device_attach(netdev); diff --git a/drivers/net/jme.h b/drivers/net/jme.h index eac09264bf2a..8bf30451e821 100644 --- a/drivers/net/jme.h +++ b/drivers/net/jme.h @@ -26,7 +26,7 @@ #define __JME_H_INCLUDED__ #define DRV_NAME "jme" -#define DRV_VERSION "1.0.7" +#define DRV_VERSION "1.0.8" #define PFX DRV_NAME ": " #define PCI_DEVICE_ID_JMICRON_JMC250 0x0250 @@ -103,6 +103,37 @@ enum jme_spi_op_bits { #define HALF_US 500 /* 500 ns */ #define JMESPIIOCTL SIOCDEVPRIVATE +#define PCI_PRIV_PE1 0xE4 + +enum pci_priv_pe1_bit_masks { + PE1_ASPMSUPRT = 0x00000003, /* + * RW: + * Aspm_support[1:0] + * (R/W Port of 5C[11:10]) + */ + PE1_MULTIFUN = 0x00000004, /* RW: Multi_fun_bit */ + PE1_RDYDMA = 0x00000008, /* RO: ~link.rdy_for_dma */ + PE1_ASPMOPTL = 0x00000030, /* RW: link.rx10s_option[1:0] */ + PE1_ASPMOPTH = 0x000000C0, /* RW: 10_req=[3]?HW:[2] */ + PE1_GPREG0 = 0x0000FF00, /* + * SRW: + * Cfg_gp_reg0 + * [7:6] phy_giga BG control + * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#) + * [4:0] Reserved + */ + PE1_GPREG0_PBG = 0x0000C000, /* phy_giga BG control */ + PE1_GPREG1 = 0x00FF0000, /* RW: Cfg_gp_reg1 */ + PE1_REVID = 0xFF000000, /* RO: Rev ID */ +}; + +enum pci_priv_pe1_values { + PE1_GPREG0_ENBG = 0x00000000, /* en BG */ + PE1_GPREG0_PDD3COLD = 0x00004000, /* giga_PD + d3cold */ + PE1_GPREG0_PDPCIESD = 0x00008000, /* giga_PD + pcie_shutdown */ + PE1_GPREG0_PDPCIEIDDQ = 0x0000C000, /* giga_PD + pcie_iddq */ +}; + /* * Dynamic(adaptive)/Static PCC values */ @@ -403,6 +434,7 @@ struct jme_adapter { u32 reg_rxmcs; u32 reg_ghc; u32 reg_pmcs; + u32 reg_gpreg1; u32 phylink; u32 tx_ring_size; u32 tx_ring_mask; @@ -411,8 +443,10 @@ struct jme_adapter { u32 rx_ring_mask; u8 mrrs; unsigned int fpgaver; - unsigned int chiprev; - u8 rev; + u8 chiprev; + u8 chip_main_rev; + u8 chip_sub_rev; + u8 pcirev; u32 msg_enable; struct ethtool_cmd old_ecmd; unsigned int old_mtu; @@ -497,6 +531,7 @@ enum jme_iomap_regs { JME_PMCS = JME_MAC | 0x60, /* Power Management Control/Stat */ + JME_PHY_PWR = JME_PHY | 0x24, /* New PHY Power Ctrl Register */ JME_PHY_CS = JME_PHY | 0x28, /* PHY Ctrl and Status Register */ JME_PHY_LINK = JME_PHY | 0x30, /* PHY Link Status Register */ JME_SMBCSR = JME_PHY | 0x40, /* SMB Control and Status */ @@ -624,6 +659,14 @@ enum jme_txtrhd_shifts { TXTRHD_TXRL_SHIFT = 0, }; +enum jme_txtrhd_values { + TXTRHD_FULLDUPLEX = 0x00000000, + TXTRHD_HALFDUPLEX = TXTRHD_TXPEN | + ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) | + TXTRHD_TXREN | + ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL), +}; + /* * RX Control/Status Bits */ @@ -779,6 +822,8 @@ static inline u32 smi_phy_addr(int x) */ enum jme_ghc_bit_mask { GHC_SWRST = 0x40000000, + GHC_TO_CLK_SRC = 0x00C00000, + GHC_TXMAC_CLK_SRC = 0x00300000, GHC_DPX = 0x00000040, GHC_SPEED = 0x00000030, GHC_LINK_POLL = 0x00000001, @@ -833,6 +878,21 @@ enum jme_pmcs_bit_masks { }; /* + * New PHY Power Control Register + */ +enum jme_phy_pwr_bit_masks { + PHY_PWR_DWN1SEL = 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */ + PHY_PWR_DWN1SW = 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */ + PHY_PWR_DWN2 = 0x04000000, /* Phy_giga.p_PWR_DOWN2 */ + PHY_PWR_CLKSEL = 0x08000000, /* + * XTL_OUT Clock select + * (an internal free-running clock) + * 0: xtl_out = phy_giga.A_XTL25_O + * 1: xtl_out = phy_giga.PD_OSC + */ +}; + +/* * Giga PHY Status Registers */ enum jme_phy_link_bit_mask { @@ -942,18 +1002,17 @@ enum jme_gpreg0_vals { /* * General Purpose REG-1 - * Note: All theses bits defined here are for - * Chip mode revision 0x11 only */ -enum jme_gpreg1_masks { +enum jme_gpreg1_bit_masks { + GPREG1_RXCLKOFF = 0x04000000, + GPREG1_PCREQN = 0x00020000, + GPREG1_HALFMODEPATCH = 0x00000040, /* For Chip revision 0x11 only */ + GPREG1_RSSPATCH = 0x00000020, /* For Chip revision 0x11 only */ GPREG1_INTRDELAYUNIT = 0x00000018, GPREG1_INTRDELAYENABLE = 0x00000007, }; enum jme_gpreg1_vals { - GPREG1_RSSPATCH = 0x00000040, - GPREG1_HALFMODEPATCH = 0x00000020, - GPREG1_INTDLYUNIT_16NS = 0x00000000, GPREG1_INTDLYUNIT_256NS = 0x00000008, GPREG1_INTDLYUNIT_1US = 0x00000010, @@ -967,7 +1026,7 @@ enum jme_gpreg1_vals { GPREG1_INTDLYEN_6U = 0x00000006, GPREG1_INTDLYEN_7U = 0x00000007, - GPREG1_DEFAULT = 0x00000000, + GPREG1_DEFAULT = GPREG1_PCREQN, }; /* @@ -1184,16 +1243,22 @@ enum jme_phy_reg17_vals { /* * Workaround */ -static inline int is_buggy250(unsigned short device, unsigned int chiprev) +static inline int is_buggy250(unsigned short device, u8 chiprev) { return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11; } +static inline int new_phy_power_ctrl(u8 chip_main_rev) +{ + return chip_main_rev >= 5; +} + /* * Function prototypes */ static int jme_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd); +static void jme_set_unicastaddr(struct net_device *netdev); static void jme_set_multi(struct net_device *netdev); #endif diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 2d9663a1c54d..ea0dc451da9c 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -129,10 +129,6 @@ static u32 always_on(struct net_device *dev) static const struct ethtool_ops loopback_ethtool_ops = { .get_link = always_on, - .set_tso = ethtool_op_set_tso, - .get_tx_csum = always_on, - .get_sg = always_on, - .get_rx_csum = always_on, }; static int loopback_dev_init(struct net_device *dev) @@ -169,9 +165,12 @@ static void loopback_setup(struct net_device *dev) dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ dev->flags = IFF_LOOPBACK; dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST - | NETIF_F_TSO + | NETIF_F_ALL_TSO + | NETIF_F_UFO | NETIF_F_NO_CSUM + | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 6ed577b065df..5b37d3c191e4 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -152,9 +152,10 @@ static void macvlan_broadcast(struct sk_buff *skb, } /* called under rcu_read_lock() from netif_receive_skb */ -static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) +static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) { struct macvlan_port *port; + struct sk_buff *skb = *pskb; const struct ethhdr *eth = eth_hdr(skb); const struct macvlan_dev *vlan; const struct macvlan_dev *src; @@ -184,7 +185,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) */ macvlan_broadcast(skb, port, src->dev, MACVLAN_MODE_VEPA); - return skb; + return RX_HANDLER_PASS; } if (port->passthru) @@ -192,12 +193,12 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) else vlan = macvlan_hash_lookup(port, eth->h_dest); if (vlan == NULL) - return skb; + return RX_HANDLER_PASS; dev = vlan->dev; if (unlikely(!(dev->flags & IFF_UP))) { kfree_skb(skb); - return NULL; + return RX_HANDLER_CONSUMED; } len = skb->len + ETH_HLEN; skb = skb_share_check(skb, GFP_ATOMIC); @@ -211,7 +212,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) out: macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0); - return NULL; + return RX_HANDLER_CONSUMED; } static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) @@ -219,9 +220,11 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; + __u8 ip_summed = skb->ip_summed; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = (void *)skb->data; + skb->ip_summed = CHECKSUM_UNNECESSARY; /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { @@ -241,6 +244,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) } xmit_world: + skb->ip_summed = ip_summed; skb_set_dev(skb, vlan->lowerdev); return dev_queue_xmit(skb); } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index fc27a9926d9e..6696e56e6320 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -39,7 +39,7 @@ struct macvtap_queue { struct socket sock; struct socket_wq wq; int vnet_hdr_sz; - struct macvlan_dev *vlan; + struct macvlan_dev __rcu *vlan; struct file *file; unsigned int flags; }; @@ -141,7 +141,8 @@ static void macvtap_put_queue(struct macvtap_queue *q) struct macvlan_dev *vlan; spin_lock(&macvtap_lock); - vlan = rcu_dereference(q->vlan); + vlan = rcu_dereference_protected(q->vlan, + lockdep_is_held(&macvtap_lock)); if (vlan) { int index = get_slot(vlan, q); @@ -219,7 +220,8 @@ static void macvtap_del_queues(struct net_device *dev) /* macvtap_put_queue can free some slots, so go through all slots */ spin_lock(&macvtap_lock); for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) { - q = rcu_dereference(vlan->taps[i]); + q = rcu_dereference_protected(vlan->taps[i], + lockdep_is_held(&macvtap_lock)); if (q) { qlist[j++] = q; rcu_assign_pointer(vlan->taps[i], NULL); @@ -570,7 +572,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, } rcu_read_lock_bh(); - vlan = rcu_dereference(q->vlan); + vlan = rcu_dereference_bh(q->vlan); if (vlan) macvlan_start_xmit(skb, vlan->dev); else @@ -584,7 +586,7 @@ err_kfree: err: rcu_read_lock_bh(); - vlan = rcu_dereference(q->vlan); + vlan = rcu_dereference_bh(q->vlan); if (vlan) vlan->dev->stats.tx_dropped++; rcu_read_unlock_bh(); @@ -632,7 +634,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len); rcu_read_lock_bh(); - vlan = rcu_dereference(q->vlan); + vlan = rcu_dereference_bh(q->vlan); if (vlan) macvlan_count_rx(vlan, len, ret == 0, 0); rcu_read_unlock_bh(); @@ -728,7 +730,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, case TUNGETIFF: rcu_read_lock_bh(); - vlan = rcu_dereference(q->vlan); + vlan = rcu_dereference_bh(q->vlan); if (vlan) dev_hold(vlan->dev); rcu_read_unlock_bh(); @@ -737,7 +739,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, return -ENOLINK; ret = 0; - if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) || + if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) || put_user(q->flags, &ifr->ifr_flags)) ret = -EFAULT; dev_put(vlan->dev); diff --git a/drivers/net/mii.c b/drivers/net/mii.c index 210b2b164b30..0a6c6a2e7550 100644 --- a/drivers/net/mii.c +++ b/drivers/net/mii.c @@ -354,7 +354,7 @@ unsigned int mii_check_media (struct mii_if_info *mii, if (!new_carrier) { netif_carrier_off(mii->dev); if (ok_to_print) - printk(KERN_INFO "%s: link down\n", mii->dev->name); + netdev_info(mii->dev, "link down\n"); return 0; /* duplex did not change */ } @@ -381,12 +381,12 @@ unsigned int mii_check_media (struct mii_if_info *mii, duplex = 1; if (ok_to_print) - printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n", - mii->dev->name, - lpa2 & (LPA_1000FULL | LPA_1000HALF) ? "1000" : - media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10", - duplex ? "full" : "half", - lpa); + netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n", + lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 : + media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? + 100 : 10, + duplex ? "full" : "half", + lpa); if ((init_media) || (mii->full_duplex != duplex)) { mii->full_duplex = duplex; diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 02076e16542a..34425b94452f 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -35,6 +35,8 @@ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/in.h> @@ -627,9 +629,8 @@ err: if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != (RX_FIRST_DESC | RX_LAST_DESC)) { if (net_ratelimit()) - dev_printk(KERN_ERR, &mp->dev->dev, - "received packet spanning " - "multiple descriptors\n"); + netdev_err(mp->dev, + "received packet spanning multiple descriptors\n"); } if (cmd_sts & ERROR_SUMMARY) @@ -868,15 +869,14 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { txq->tx_dropped++; - dev_printk(KERN_DEBUG, &dev->dev, - "failed to linearize skb with tiny " - "unaligned fragment\n"); + netdev_printk(KERN_DEBUG, dev, + "failed to linearize skb with tiny unaligned fragment\n"); return NETDEV_TX_BUSY; } if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { if (net_ratelimit()) - dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n"); + netdev_err(dev, "tx queue full?!\n"); kfree_skb(skb); return NETDEV_TX_OK; } @@ -959,7 +959,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) skb = __skb_dequeue(&txq->tx_skb); if (cmd_sts & ERROR_SUMMARY) { - dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); + netdev_info(mp->dev, "tx error\n"); mp->dev->stats.tx_errors++; } @@ -1122,20 +1122,20 @@ static int smi_bus_read(struct mii_bus *bus, int addr, int reg) int ret; if (smi_wait_ready(msp)) { - printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); + pr_warn("SMI bus busy timeout\n"); return -ETIMEDOUT; } writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); if (smi_wait_ready(msp)) { - printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); + pr_warn("SMI bus busy timeout\n"); return -ETIMEDOUT; } ret = readl(smi_reg); if (!(ret & SMI_READ_VALID)) { - printk(KERN_WARNING "mv643xx_eth: SMI bus read not valid\n"); + pr_warn("SMI bus read not valid\n"); return -ENODEV; } @@ -1148,7 +1148,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) void __iomem *smi_reg = msp->base + SMI_REG; if (smi_wait_ready(msp)) { - printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); + pr_warn("SMI bus busy timeout\n"); return -ETIMEDOUT; } @@ -1156,7 +1156,7 @@ static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val) (addr << 16) | (val & 0xffff), smi_reg); if (smi_wait_ready(msp)) { - printk(KERN_WARNING "mv643xx_eth: SMI bus busy timeout\n"); + pr_warn("SMI bus busy timeout\n"); return -ETIMEDOUT; } @@ -1566,9 +1566,8 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er) if (netif_running(dev)) { mv643xx_eth_stop(dev); if (mv643xx_eth_open(dev)) { - dev_printk(KERN_ERR, &dev->dev, - "fatal error on re-opening device after " - "ring param change\n"); + netdev_err(dev, + "fatal error on re-opening device after ring param change\n"); return -ENOMEM; } } @@ -1874,7 +1873,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) } if (rxq->rx_desc_area == NULL) { - dev_printk(KERN_ERR, &mp->dev->dev, + netdev_err(mp->dev, "can't allocate rx ring (%d bytes)\n", size); goto out; } @@ -1884,8 +1883,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index) rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb), GFP_KERNEL); if (rxq->rx_skb == NULL) { - dev_printk(KERN_ERR, &mp->dev->dev, - "can't allocate rx skb ring\n"); + netdev_err(mp->dev, "can't allocate rx skb ring\n"); goto out_free; } @@ -1944,8 +1942,7 @@ static void rxq_deinit(struct rx_queue *rxq) } if (rxq->rx_desc_count) { - dev_printk(KERN_ERR, &mp->dev->dev, - "error freeing rx ring -- %d skbs stuck\n", + netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", rxq->rx_desc_count); } @@ -1987,7 +1984,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) } if (txq->tx_desc_area == NULL) { - dev_printk(KERN_ERR, &mp->dev->dev, + netdev_err(mp->dev, "can't allocate tx ring (%d bytes)\n", size); return -ENOMEM; } @@ -2093,7 +2090,7 @@ static void handle_link_event(struct mv643xx_eth_private *mp) if (netif_carrier_ok(dev)) { int i; - printk(KERN_INFO "%s: link down\n", dev->name); + netdev_info(dev, "link down\n"); netif_carrier_off(dev); @@ -2124,10 +2121,8 @@ static void handle_link_event(struct mv643xx_eth_private *mp) duplex = (port_status & FULL_DUPLEX) ? 1 : 0; fc = (port_status & FLOW_CONTROL_ENABLED) ? 1 : 0; - printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, " - "flow control %sabled\n", dev->name, - speed, duplex ? "full" : "half", - fc ? "en" : "dis"); + netdev_info(dev, "link up, %d Mb/s, %s duplex, flow control %sabled\n", + speed, duplex ? "full" : "half", fc ? "en" : "dis"); if (!netif_carrier_ok(dev)) netif_carrier_on(dev); @@ -2337,7 +2332,7 @@ static int mv643xx_eth_open(struct net_device *dev) err = request_irq(dev->irq, mv643xx_eth_irq, IRQF_SHARED, dev->name, dev); if (err) { - dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); + netdev_err(dev, "can't assign irq\n"); return -EAGAIN; } @@ -2483,9 +2478,8 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) */ mv643xx_eth_stop(dev); if (mv643xx_eth_open(dev)) { - dev_printk(KERN_ERR, &dev->dev, - "fatal error on re-opening device after " - "MTU change\n"); + netdev_err(dev, + "fatal error on re-opening device after MTU change\n"); } return 0; @@ -2508,7 +2502,7 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev) { struct mv643xx_eth_private *mp = netdev_priv(dev); - dev_printk(KERN_INFO, &dev->dev, "tx timeout\n"); + netdev_info(dev, "tx timeout\n"); schedule_work(&mp->tx_timeout_task); } @@ -2603,8 +2597,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) int ret; if (!mv643xx_eth_version_printed++) - printk(KERN_NOTICE "MV-643xx 10/100/1000 ethernet " - "driver version %s\n", mv643xx_eth_driver_version); + pr_notice("MV-643xx 10/100/1000 ethernet driver version %s\n", + mv643xx_eth_driver_version); ret = -EINVAL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2871,14 +2865,12 @@ static int mv643xx_eth_probe(struct platform_device *pdev) pd = pdev->dev.platform_data; if (pd == NULL) { - dev_printk(KERN_ERR, &pdev->dev, - "no mv643xx_eth_platform_data\n"); + dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n"); return -ENODEV; } if (pd->shared == NULL) { - dev_printk(KERN_ERR, &pdev->dev, - "no mv643xx_eth_platform_data->shared\n"); + dev_err(&pdev->dev, "no mv643xx_eth_platform_data->shared\n"); return -ENODEV; } @@ -2957,11 +2949,11 @@ static int mv643xx_eth_probe(struct platform_device *pdev) if (err) goto out; - dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %pM\n", - mp->port_num, dev->dev_addr); + netdev_notice(dev, "port %d with MAC address %pM\n", + mp->port_num, dev->dev_addr); if (mp->tx_desc_sram_size > 0) - dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); + netdev_notice(dev, "configured with sram\n"); return 0; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index ea5cfe2c3a04..a7f2eed9a08a 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -253,7 +253,7 @@ struct myri10ge_priv { unsigned long serial_number; int vendor_specific_offset; int fw_multicast_support; - unsigned long features; + u32 features; u32 max_tso6; u32 read_dma; u32 write_dma; @@ -1776,7 +1776,7 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled) { struct myri10ge_priv *mgp = netdev_priv(netdev); - unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO); + u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO); if (tso_enabled) netdev->features |= flags; diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index a11380544e6c..d7299f1a4940 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -739,7 +739,8 @@ struct netxen_recv_context { #define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c #define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d #define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e -#define NX_CDRP_CMD_MAX 0x0000001f +#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f +#define NX_CDRP_CMD_MAX 0x00000020 #define NX_RCODE_SUCCESS 0 #define NX_RCODE_NO_HOST_MEM 1 @@ -1054,6 +1055,7 @@ typedef struct { #define NX_FW_CAPABILITY_BDG (1 << 8) #define NX_FW_CAPABILITY_FVLANTX (1 << 9) #define NX_FW_CAPABILITY_HW_LRO (1 << 10) +#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11) /* module types */ #define LINKEVENT_MODULE_NOT_PRESENT 1 @@ -1349,6 +1351,8 @@ void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup); void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *); void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64); +int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, + u32 speed, u32 duplex, u32 autoneg); int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable); diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index f7d06cbc70ae..f16966afa64e 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c @@ -112,6 +112,21 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) return 0; } +int +nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, + u32 speed, u32 duplex, u32 autoneg) +{ + + return netxen_issue_cmd(adapter, + adapter->ahw.pci_func, + NXHAL_VERSION, + speed, + duplex, + autoneg, + NX_CDRP_CMD_CONFIG_GBE_PORT); + +} + static int nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) { diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 587498e140bb..653d308e0f5d 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c @@ -214,7 +214,6 @@ skip: check_sfp_module = netif_running(dev) && adapter->has_link_events; } else { - ecmd->autoneg = AUTONEG_ENABLE; ecmd->supported |= (SUPPORTED_TP |SUPPORTED_Autoneg); ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); @@ -252,53 +251,24 @@ static int netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct netxen_adapter *adapter = netdev_priv(dev); - __u32 status; + int ret; - /* read which mode */ - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { - /* autonegotiation */ - if (adapter->phy_write && - adapter->phy_write(adapter, - NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, - ecmd->autoneg) != 0) - return -EIO; - else - adapter->link_autoneg = ecmd->autoneg; + if (adapter->ahw.port_type != NETXEN_NIC_GBE) + return -EOPNOTSUPP; - if (adapter->phy_read && - adapter->phy_read(adapter, - NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, - &status) != 0) - return -EIO; + if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG)) + return -EOPNOTSUPP; - /* speed */ - switch (ecmd->speed) { - case SPEED_10: - netxen_set_phy_speed(status, 0); - break; - case SPEED_100: - netxen_set_phy_speed(status, 1); - break; - case SPEED_1000: - netxen_set_phy_speed(status, 2); - break; - } - /* set duplex mode */ - if (ecmd->duplex == DUPLEX_HALF) - netxen_clear_phy_duplex(status); - if (ecmd->duplex == DUPLEX_FULL) - netxen_set_phy_duplex(status); - if (adapter->phy_write && - adapter->phy_write(adapter, - NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, - *((int *)&status)) != 0) - return -EIO; - else { - adapter->link_speed = ecmd->speed; - adapter->link_duplex = ecmd->duplex; - } - } else + ret = nx_fw_cmd_set_gbe_port(adapter, ecmd->speed, ecmd->duplex, + ecmd->autoneg); + if (ret == NX_RCODE_NOT_SUPPORTED) return -EOPNOTSUPP; + else if (ret) + return -EIO; + + adapter->link_speed = ecmd->speed; + adapter->link_duplex = ecmd->duplex; + adapter->link_autoneg = ecmd->autoneg; if (!netif_running(dev)) return 0; diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 33fac32e0d9f..83348dc4b184 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -1032,6 +1032,9 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) netif_carrier_off(netdev); netif_tx_disable(netdev); + if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION) + netxen_linkevent_request(adapter, 0); + if (adapter->stop_port) adapter->stop_port(adapter); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 35fda5ac8120..392a6c4b72e5 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -77,7 +77,6 @@ config NATIONAL_PHY Currently supports the DP83865 PHY. config STE10XP - depends on PHYLIB tristate "Driver for STMicroelectronics STe10Xp PHYs" ---help--- This is the driver for the STe100p and STe101p PHYs. diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 0fd1678bc5a9..590f902deb6b 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -19,13 +19,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/phy.h> - -#define PHY_ID_KSZ9021 0x00221611 -#define PHY_ID_KS8737 0x00221720 -#define PHY_ID_KS8041 0x00221510 -#define PHY_ID_KS8051 0x00221550 -/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */ -#define PHY_ID_KS8001 0x0022161A +#include <linux/micrel_phy.h> /* general Interrupt control/status reg in vendor specific block. */ #define MII_KSZPHY_INTCS 0x1B @@ -46,6 +40,7 @@ #define KSZPHY_CTRL_INT_ACTIVE_HIGH (1 << 9) #define KSZ9021_CTRL_INT_ACTIVE_HIGH (1 << 14) #define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) +#define KSZ8051_RMII_50MHZ_CLK (1 << 7) static int kszphy_ack_interrupt(struct phy_device *phydev) { @@ -106,6 +101,19 @@ static int kszphy_config_init(struct phy_device *phydev) return 0; } +static int ks8051_config_init(struct phy_device *phydev) +{ + int regval; + + if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) { + regval = phy_read(phydev, MII_KSZPHY_CTRL); + regval |= KSZ8051_RMII_50MHZ_CLK; + phy_write(phydev, MII_KSZPHY_CTRL, regval); + } + + return 0; +} + static struct phy_driver ks8737_driver = { .phy_id = PHY_ID_KS8737, .phy_id_mask = 0x00fffff0, @@ -142,7 +150,7 @@ static struct phy_driver ks8051_driver = { .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, - .config_init = kszphy_config_init, + .config_init = ks8051_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index a8445c72fc13..f7670330f988 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -319,7 +319,8 @@ int phy_mii_ioctl(struct phy_device *phydev, /* fall through */ case SIOCGMIIREG: - mii_data->val_out = phy_read(phydev, mii_data->reg_num); + mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id, + mii_data->reg_num); break; case SIOCSMIIREG: @@ -350,8 +351,9 @@ int phy_mii_ioctl(struct phy_device *phydev, } } - phy_write(phydev, mii_data->reg_num, val); - + mdiobus_write(phydev->bus, mii_data->phy_id, + mii_data->reg_num, val); + if (mii_data->reg_num == MII_BMCR && val & BMCR_RESET && phydev->drv->config_init) { diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index c7a6c4466978..9f6d670748d1 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -592,8 +592,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ppp_release(NULL, file); err = 0; } else - printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n", - atomic_long_read(&file->f_count)); + pr_warn("PPPIOCDETACH file->f_count=%ld\n", + atomic_long_read(&file->f_count)); mutex_unlock(&ppp_mutex); return err; } @@ -630,7 +630,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (pf->kind != INTERFACE) { /* can't happen */ - printk(KERN_ERR "PPP: not interface or channel??\n"); + pr_err("PPP: not interface or channel??\n"); return -EINVAL; } @@ -704,7 +704,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } vj = slhc_init(val2+1, val+1); if (!vj) { - printk(KERN_ERR "PPP: no memory (VJ compressor)\n"); + netdev_err(ppp->dev, + "PPP: no memory (VJ compressor)\n"); err = -ENOMEM; break; } @@ -898,17 +899,17 @@ static int __init ppp_init(void) { int err; - printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n"); + pr_info("PPP generic driver version " PPP_VERSION "\n"); err = register_pernet_device(&ppp_net_ops); if (err) { - printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err); + pr_err("failed to register PPP pernet device (%d)\n", err); goto out; } err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); if (err) { - printk(KERN_ERR "failed to register PPP device (%d)\n", err); + pr_err("failed to register PPP device (%d)\n", err); goto out_net; } @@ -1078,7 +1079,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); if (!new_skb) { if (net_ratelimit()) - printk(KERN_ERR "PPP: no memory (comp pkt)\n"); + netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n"); return NULL; } if (ppp->dev->hard_header_len > PPP_HDRLEN) @@ -1108,7 +1109,7 @@ pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) * the same number. */ if (net_ratelimit()) - printk(KERN_ERR "ppp: compressor dropped pkt\n"); + netdev_err(ppp->dev, "ppp: compressor dropped pkt\n"); kfree_skb(skb); kfree_skb(new_skb); new_skb = NULL; @@ -1138,7 +1139,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) if (ppp->pass_filter && sk_run_filter(skb, ppp->pass_filter) == 0) { if (ppp->debug & 1) - printk(KERN_DEBUG "PPP: outbound frame not passed\n"); + netdev_printk(KERN_DEBUG, ppp->dev, + "PPP: outbound frame " + "not passed\n"); kfree_skb(skb); return; } @@ -1164,7 +1167,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, GFP_ATOMIC); if (!new_skb) { - printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n"); + netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n"); goto drop; } skb_reserve(new_skb, ppp->dev->hard_header_len - 2); @@ -1202,7 +1205,9 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) proto != PPP_LCP && proto != PPP_CCP) { if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { if (net_ratelimit()) - printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n"); + netdev_err(ppp->dev, + "ppp: compression required but " + "down - pkt dropped.\n"); goto drop; } skb = pad_compress_skb(ppp, skb); @@ -1505,7 +1510,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) noskb: spin_unlock_bh(&pch->downl); if (ppp->debug & 1) - printk(KERN_ERR "PPP: no memory (fragment)\n"); + netdev_err(ppp->dev, "PPP: no memory (fragment)\n"); ++ppp->dev->stats.tx_errors; ++ppp->nxseq; return 1; /* abandon the frame */ @@ -1686,7 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) /* copy to a new sk_buff with more tailroom */ ns = dev_alloc_skb(skb->len + 128); if (!ns) { - printk(KERN_ERR"PPP: no memory (VJ decomp)\n"); + netdev_err(ppp->dev, "PPP: no memory " + "(VJ decomp)\n"); goto err; } skb_reserve(ns, 2); @@ -1699,7 +1705,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); if (len <= 0) { - printk(KERN_DEBUG "PPP: VJ decompression error\n"); + netdev_printk(KERN_DEBUG, ppp->dev, + "PPP: VJ decompression error\n"); goto err; } len += 2; @@ -1721,7 +1728,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) goto err; if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { - printk(KERN_ERR "PPP: VJ uncompressed error\n"); + netdev_err(ppp->dev, "PPP: VJ uncompressed error\n"); goto err; } proto = PPP_IP; @@ -1762,8 +1769,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) if (ppp->pass_filter && sk_run_filter(skb, ppp->pass_filter) == 0) { if (ppp->debug & 1) - printk(KERN_DEBUG "PPP: inbound frame " - "not passed\n"); + netdev_printk(KERN_DEBUG, ppp->dev, + "PPP: inbound frame " + "not passed\n"); kfree_skb(skb); return; } @@ -1821,7 +1829,8 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) ns = dev_alloc_skb(obuff_size); if (!ns) { - printk(KERN_ERR "ppp_decompress_frame: no memory\n"); + netdev_err(ppp->dev, "ppp_decompress_frame: " + "no memory\n"); goto err; } /* the decompressor still expects the A/C bytes in the hdr */ @@ -1989,7 +1998,7 @@ ppp_mp_reconstruct(struct ppp *ppp) u32 seq = ppp->nextseq; u32 minseq = ppp->minseq; struct sk_buff_head *list = &ppp->mrq; - struct sk_buff *p, *next; + struct sk_buff *p, *tmp; struct sk_buff *head, *tail; struct sk_buff *skb = NULL; int lost = 0, len = 0; @@ -1998,13 +2007,15 @@ ppp_mp_reconstruct(struct ppp *ppp) return NULL; head = list->next; tail = NULL; - for (p = head; p != (struct sk_buff *) list; p = next) { - next = p->next; + skb_queue_walk_safe(list, p, tmp) { + again: if (seq_before(PPP_MP_CB(p)->sequence, seq)) { /* this can't happen, anyway ignore the skb */ - printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n", - PPP_MP_CB(p)->sequence, seq); - head = next; + netdev_err(ppp->dev, "ppp_mp_reconstruct bad " + "seq %u < %u\n", + PPP_MP_CB(p)->sequence, seq); + __skb_unlink(p, list); + kfree_skb(p); continue; } if (PPP_MP_CB(p)->sequence != seq) { @@ -2016,8 +2027,7 @@ ppp_mp_reconstruct(struct ppp *ppp) lost = 1; seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? minseq + 1: PPP_MP_CB(p)->sequence; - next = p; - continue; + goto again; } /* @@ -2042,17 +2052,9 @@ ppp_mp_reconstruct(struct ppp *ppp) (PPP_MP_CB(head)->BEbits & B)) { if (len > ppp->mrru + 2) { ++ppp->dev->stats.rx_length_errors; - printk(KERN_DEBUG "PPP: reconstructed packet" - " is too long (%d)\n", len); - } else if (p == head) { - /* fragment is complete packet - reuse skb */ - tail = p; - skb = skb_get(p); - break; - } else if ((skb = dev_alloc_skb(len)) == NULL) { - ++ppp->dev->stats.rx_missed_errors; - printk(KERN_DEBUG "PPP: no memory for " - "reconstructed packet"); + netdev_printk(KERN_DEBUG, ppp->dev, + "PPP: reconstructed packet" + " is too long (%d)\n", len); } else { tail = p; break; @@ -2065,9 +2067,17 @@ ppp_mp_reconstruct(struct ppp *ppp) * and we haven't found a complete valid packet yet, * we can discard up to and including this fragment. */ - if (PPP_MP_CB(p)->BEbits & E) - head = next; + if (PPP_MP_CB(p)->BEbits & E) { + struct sk_buff *tmp2; + skb_queue_reverse_walk_from_safe(list, p, tmp2) { + __skb_unlink(p, list); + kfree_skb(p); + } + head = skb_peek(list); + if (!head) + break; + } ++seq; } @@ -2077,26 +2087,37 @@ ppp_mp_reconstruct(struct ppp *ppp) signal a receive error. */ if (PPP_MP_CB(head)->sequence != ppp->nextseq) { if (ppp->debug & 1) - printk(KERN_DEBUG " missed pkts %u..%u\n", - ppp->nextseq, - PPP_MP_CB(head)->sequence-1); + netdev_printk(KERN_DEBUG, ppp->dev, + " missed pkts %u..%u\n", + ppp->nextseq, + PPP_MP_CB(head)->sequence-1); ++ppp->dev->stats.rx_dropped; ppp_receive_error(ppp); } - if (head != tail) - /* copy to a single skb */ - for (p = head; p != tail->next; p = p->next) - skb_copy_bits(p, 0, skb_put(skb, p->len), p->len); - ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; - head = tail->next; - } + skb = head; + if (head != tail) { + struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list; + p = skb_queue_next(list, head); + __skb_unlink(skb, list); + skb_queue_walk_from_safe(list, p, tmp) { + __skb_unlink(p, list); + *fragpp = p; + p->next = NULL; + fragpp = &p->next; + + skb->len += p->len; + skb->data_len += p->len; + skb->truesize += p->len; + + if (p == tail) + break; + } + } else { + __skb_unlink(skb, list); + } - /* Discard all the skbuffs that we have copied the data out of - or that we can't use. */ - while ((p = list->next) != head) { - __skb_unlink(p, list); - kfree_skb(p); + ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; } return skb; @@ -2617,8 +2638,8 @@ ppp_create_interface(struct net *net, int unit, int *retp) ret = register_netdev(dev); if (ret != 0) { unit_put(&pn->units_idr, unit); - printk(KERN_ERR "PPP: couldn't register device %s (%d)\n", - dev->name, ret); + netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", + dev->name, ret); goto out2; } @@ -2690,9 +2711,9 @@ static void ppp_destroy_interface(struct ppp *ppp) if (!ppp->file.dead || ppp->n_channels) { /* "can't happen" */ - printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d " - "n_channels=%d !\n", ppp, ppp->file.dead, - ppp->n_channels); + netdev_err(ppp->dev, "ppp: destroying ppp struct %p " + "but dead=%d n_channels=%d !\n", + ppp, ppp->file.dead, ppp->n_channels); return; } @@ -2834,8 +2855,7 @@ static void ppp_destroy_channel(struct channel *pch) if (!pch->file.dead) { /* "can't happen" */ - printk(KERN_ERR "ppp: destroying undead channel %p !\n", - pch); + pr_err("ppp: destroying undead channel %p !\n", pch); return; } skb_queue_purge(&pch->file.xq); @@ -2847,7 +2867,7 @@ static void __exit ppp_cleanup(void) { /* should never happen */ if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) - printk(KERN_ERR "PPP: removing module but units remain!\n"); + pr_err("PPP: removing module but units remain!\n"); unregister_chrdev(PPP_MAJOR, "ppp"); device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); class_destroy(ppp_class); @@ -2865,7 +2885,7 @@ static int __unit_alloc(struct idr *p, void *ptr, int n) again: if (!idr_pre_get(p, GFP_KERNEL)) { - printk(KERN_ERR "PPP: No free memory for idr\n"); + pr_err("PPP: No free memory for idr\n"); return -ENOMEM; } diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c index 164cfad6ce79..51dfcf8023c7 100644 --- a/drivers/net/pptp.c +++ b/drivers/net/pptp.c @@ -175,7 +175,6 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) struct pptp_opt *opt = &po->proto.pptp; struct pptp_gre_header *hdr; unsigned int header_len = sizeof(*hdr); - int err = 0; int islcp; int len; unsigned char *data; @@ -190,18 +189,14 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) if (sk_pppox(po)->sk_state & PPPOX_DEAD) goto tx_error; - { - struct flowi fl = { .oif = 0, - .nl_u = { - .ip4_u = { - .daddr = opt->dst_addr.sin_addr.s_addr, - .saddr = opt->src_addr.sin_addr.s_addr, - .tos = RT_TOS(0) } }, - .proto = IPPROTO_GRE }; - err = ip_route_output_key(&init_net, &rt, &fl); - if (err) - goto tx_error; - } + rt = ip_route_output_ports(&init_net, NULL, + opt->dst_addr.sin_addr.s_addr, + opt->src_addr.sin_addr.s_addr, + 0, 0, IPPROTO_GRE, + RT_TOS(0), 0); + if (IS_ERR(rt)) + goto tx_error; + tdev = rt->dst.dev; max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2; @@ -468,21 +463,17 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.private = sk; po->chan.ops = &pptp_chan_ops; - { - struct flowi fl = { - .nl_u = { - .ip4_u = { - .daddr = opt->dst_addr.sin_addr.s_addr, - .saddr = opt->src_addr.sin_addr.s_addr, - .tos = RT_CONN_FLAGS(sk) } }, - .proto = IPPROTO_GRE }; - security_sk_classify_flow(sk, &fl); - if (ip_route_output_key(&init_net, &rt, &fl)) { - error = -EHOSTUNREACH; - goto end; - } - sk_setup_caps(sk, &rt->dst); + rt = ip_route_output_ports(&init_net, sk, + opt->dst_addr.sin_addr.s_addr, + opt->src_addr.sin_addr.s_addr, + 0, 0, + IPPROTO_GRE, RT_CONN_FLAGS(sk), 0); + if (IS_ERR(rt)) { + error = -EHOSTUNREACH; + goto end; } + sk_setup_caps(sk, &rt->dst); + po->chan.mtu = dst_mtu(&rt->dst); if (!po->chan.mtu) po->chan.mtu = PPP_MTU; diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 1a3584edd79c..2d21c60085bc 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -379,7 +379,7 @@ static void fm93c56a_select(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; - u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); @@ -398,7 +398,7 @@ static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) u32 previousBit; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; - u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; /* Clock in a zero, then do the start bit */ ql_write_nvram_reg(qdev, spir, @@ -467,7 +467,7 @@ static void fm93c56a_deselect(struct ql3_adapter *qdev) { struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; - u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); @@ -483,7 +483,7 @@ static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) u32 dataBit; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; - u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; /* Read the data bits */ /* The first bit is a dummy. Clock right over it. */ @@ -3011,7 +3011,7 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev) u32 value; struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers; - u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; + __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; struct ql3xxx_host_memory_registers __iomem *hmem_regs = (void __iomem *)port_regs; u32 delay = 10; diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h index 44e316fd67b8..dc44564ef6f9 100644 --- a/drivers/net/qlcnic/qlcnic.h +++ b/drivers/net/qlcnic/qlcnic.h @@ -867,7 +867,6 @@ struct qlcnic_nic_intr_coalesce { #define LINKEVENT_LINKSPEED_MBPS 0 #define LINKEVENT_LINKSPEED_ENCODED 1 -#define AUTO_FW_RESET_ENABLED 0x01 /* firmware response header: * 63:58 - message type * 57:56 - owner @@ -1133,14 +1132,10 @@ struct qlcnic_eswitch { #define MAX_BW 100 /* % of link speed */ #define MAX_VLAN_ID 4095 #define MIN_VLAN_ID 2 -#define MAX_TX_QUEUES 1 -#define MAX_RX_QUEUES 4 #define DEFAULT_MAC_LEARN 1 #define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) #define IS_VALID_BW(bw) (bw <= MAX_BW) -#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES) -#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES) struct qlcnic_pci_func_cfg { u16 func_type; diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index 37c04b4fade3..cd88c7e1bfa9 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c @@ -42,7 +42,7 @@ static int use_msi_x = 1; module_param(use_msi_x, int, 0444); MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); -static int auto_fw_reset = AUTO_FW_RESET_ENABLED; +static int auto_fw_reset = 1; module_param(auto_fw_reset, int, 0644); MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); @@ -2959,8 +2959,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) if (adapter->need_fw_reset) goto detach; - if (adapter->reset_context && - auto_fw_reset == AUTO_FW_RESET_ENABLED) { + if (adapter->reset_context && auto_fw_reset) { qlcnic_reset_hw_context(adapter); adapter->netdev->trans_start = jiffies; } @@ -2973,7 +2972,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) qlcnic_dev_request_reset(adapter); - if ((auto_fw_reset == AUTO_FW_RESET_ENABLED)) + if (auto_fw_reset) clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state); dev_info(&netdev->dev, "firmware hang detected\n"); @@ -2982,7 +2981,7 @@ detach: adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state : QLCNIC_DEV_NEED_RESET; - if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && + if (auto_fw_reset && !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) { qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); @@ -3654,10 +3653,8 @@ validate_npar_config(struct qlcnic_adapter *adapter, if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC) return QL_STATUS_INVALID_PARAM; - if (!IS_VALID_BW(np_cfg[i].min_bw) - || !IS_VALID_BW(np_cfg[i].max_bw) - || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues) - || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues)) + if (!IS_VALID_BW(np_cfg[i].min_bw) || + !IS_VALID_BW(np_cfg[i].max_bw)) return QL_STATUS_INVALID_PARAM; } return 0; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 7ffdb80adf40..5e403511289d 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -37,6 +37,7 @@ #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" #ifdef RTL8169_DEBUG #define assert(expr) \ @@ -124,6 +125,8 @@ enum mac_version { RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP + RTL_GIGA_MAC_VER_29 = 0x1d, // 8105E + RTL_GIGA_MAC_VER_30 = 0x1e, // 8105E }; #define _R(NAME,MAC,MASK) \ @@ -161,7 +164,9 @@ static const struct { _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E - _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880) // PCI-E + _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880), // PCI-E + _R("RTL8105e", RTL_GIGA_MAC_VER_29, 0xff7e1880), // PCI-E + _R("RTL8105e", RTL_GIGA_MAC_VER_30, 0xff7e1880) // PCI-E }; #undef _R @@ -268,9 +273,15 @@ enum rtl8168_8101_registers { #define EPHYAR_REG_MASK 0x1f #define EPHYAR_REG_SHIFT 16 #define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PM_SWITCH (1 << 6) DBG_REG = 0xd1, #define FIX_NAK_1 (1 << 4) #define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) EFUSEAR = 0xdc, #define EFUSEAR_FLAG 0x80000000 #define EFUSEAR_WRITE_CMD 0x80000000 @@ -527,9 +538,6 @@ struct rtl8169_private { u16 napi_event; u16 intr_mask; int phy_1000_ctrl_reg; -#ifdef CONFIG_R8169_VLAN - struct vlan_group *vlgrp; -#endif struct mdio_ops { void (*write)(void __iomem *, int, int); @@ -541,7 +549,7 @@ struct rtl8169_private { void (*up)(struct rtl8169_private *); } pll_power_ops; - int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex); + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); int (*get_settings)(struct net_device *, struct ethtool_cmd *); void (*phy_reset_enable)(struct rtl8169_private *tp); void (*hw_start)(struct net_device *); @@ -569,6 +577,7 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(RTL8169_VERSION); MODULE_FIRMWARE(FIRMWARE_8168D_1); MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8105E_1); static int rtl8169_open(struct net_device *dev); static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, @@ -1098,7 +1107,7 @@ static int rtl8169_get_regs_len(struct net_device *dev) } static int rtl8169_set_speed_tbi(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex) + u8 autoneg, u16 speed, u8 duplex, u32 ignored) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; @@ -1121,17 +1130,30 @@ static int rtl8169_set_speed_tbi(struct net_device *dev, } static int rtl8169_set_speed_xmii(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex) + u8 autoneg, u16 speed, u8 duplex, u32 adv) { struct rtl8169_private *tp = netdev_priv(dev); int giga_ctrl, bmcr; + int rc = -EINVAL; + + rtl_writephy(tp, 0x1f, 0x0000); if (autoneg == AUTONEG_ENABLE) { int auto_nego; auto_nego = rtl_readphy(tp, MII_ADVERTISE); - auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL | - ADVERTISE_100HALF | ADVERTISE_100FULL); + auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + + if (adv & ADVERTISED_10baseT_Half) + auto_nego |= ADVERTISE_10HALF; + if (adv & ADVERTISED_10baseT_Full) + auto_nego |= ADVERTISE_10FULL; + if (adv & ADVERTISED_100baseT_Half) + auto_nego |= ADVERTISE_100HALF; + if (adv & ADVERTISED_100baseT_Full) + auto_nego |= ADVERTISE_100FULL; + auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; giga_ctrl = rtl_readphy(tp, MII_CTRL1000); @@ -1145,27 +1167,22 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, (tp->mac_version != RTL_GIGA_MAC_VER_13) && (tp->mac_version != RTL_GIGA_MAC_VER_14) && (tp->mac_version != RTL_GIGA_MAC_VER_15) && - (tp->mac_version != RTL_GIGA_MAC_VER_16)) { - giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF; - } else { + (tp->mac_version != RTL_GIGA_MAC_VER_16) && + (tp->mac_version != RTL_GIGA_MAC_VER_29) && + (tp->mac_version != RTL_GIGA_MAC_VER_30)) { + if (adv & ADVERTISED_1000baseT_Half) + giga_ctrl |= ADVERTISE_1000HALF; + if (adv & ADVERTISED_1000baseT_Full) + giga_ctrl |= ADVERTISE_1000FULL; + } else if (adv & (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) { netif_info(tp, link, dev, "PHY does not support 1000Mbps\n"); + goto out; } bmcr = BMCR_ANENABLE | BMCR_ANRESTART; - if ((tp->mac_version == RTL_GIGA_MAC_VER_11) || - (tp->mac_version == RTL_GIGA_MAC_VER_12) || - (tp->mac_version >= RTL_GIGA_MAC_VER_17)) { - /* - * Wake up the PHY. - * Vendor specific (0x1f) and reserved (0x0e) MII - * registers. - */ - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, 0x0e, 0x0000); - } - rtl_writephy(tp, MII_ADVERTISE, auto_nego); rtl_writephy(tp, MII_CTRL1000, giga_ctrl); } else { @@ -1176,12 +1193,10 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, else if (speed == SPEED_100) bmcr = BMCR_SPEED100; else - return -EINVAL; + goto out; if (duplex == DUPLEX_FULL) bmcr |= BMCR_FULLDPLX; - - rtl_writephy(tp, 0x1f, 0x0000); } tp->phy_1000_ctrl_reg = giga_ctrl; @@ -1199,16 +1214,18 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, } } - return 0; + rc = 0; +out: + return rc; } static int rtl8169_set_speed(struct net_device *dev, - u8 autoneg, u16 speed, u8 duplex) + u8 autoneg, u16 speed, u8 duplex, u32 advertising) { struct rtl8169_private *tp = netdev_priv(dev); int ret; - ret = tp->set_speed(dev, autoneg, speed, duplex); + ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)) mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); @@ -1223,7 +1240,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) int ret; spin_lock_irqsave(&tp->lock, flags); - ret = rtl8169_set_speed(dev, cmd->autoneg, cmd->speed, cmd->duplex); + ret = rtl8169_set_speed(dev, + cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising); spin_unlock_irqrestore(&tp->lock, flags); return ret; @@ -1257,8 +1275,6 @@ static int rtl8169_set_rx_csum(struct net_device *dev, u32 data) return 0; } -#ifdef CONFIG_R8169_VLAN - static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, struct sk_buff *skb) { @@ -1266,64 +1282,37 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; } -static void rtl8169_vlan_rx_register(struct net_device *dev, - struct vlan_group *grp) +#define NETIF_F_HW_VLAN_TX_RX (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX) + +static void rtl8169_vlan_mode(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; spin_lock_irqsave(&tp->lock, flags); - tp->vlgrp = grp; - /* - * Do not disable RxVlan on 8110SCd. - */ - if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05)) + if (dev->features & NETIF_F_HW_VLAN_RX) tp->cp_cmd |= RxVlan; else tp->cp_cmd &= ~RxVlan; RTL_W16(CPlusCmd, tp->cp_cmd); + /* PCI commit */ RTL_R16(CPlusCmd); spin_unlock_irqrestore(&tp->lock, flags); + + dev->vlan_features = dev->features &~ NETIF_F_HW_VLAN_TX_RX; } -static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, - struct sk_buff *skb, int polling) +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) { u32 opts2 = le32_to_cpu(desc->opts2); - struct vlan_group *vlgrp = tp->vlgrp; - int ret; - if (vlgrp && (opts2 & RxVlanTag)) { - u16 vtag = swab16(opts2 & 0xffff); + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); - if (likely(polling)) - vlan_gro_receive(&tp->napi, vlgrp, vtag, skb); - else - __vlan_hwaccel_rx(skb, vlgrp, vtag, polling); - ret = 0; - } else - ret = -1; desc->opts2 = 0; - return ret; } -#else /* !CONFIG_R8169_VLAN */ - -static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp, - struct sk_buff *skb) -{ - return 0; -} - -static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, - struct sk_buff *skb, int polling) -{ - return -1; -} - -#endif - static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) { struct rtl8169_private *tp = netdev_priv(dev); @@ -1494,6 +1483,28 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } +static int rtl8169_set_flags(struct net_device *dev, u32 data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + unsigned long old_feat = dev->features; + int rc; + + if ((tp->mac_version == RTL_GIGA_MAC_VER_05) && + !(data & ETH_FLAG_RXVLAN)) { + netif_info(tp, drv, dev, "8110SCd requires hardware Rx VLAN\n"); + return -EINVAL; + } + + rc = ethtool_op_set_flags(dev, data, ETH_FLAG_TXVLAN | ETH_FLAG_RXVLAN); + if (rc) + return rc; + + if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) + rtl8169_vlan_mode(dev); + + return 0; +} + static const struct ethtool_ops rtl8169_ethtool_ops = { .get_drvinfo = rtl8169_get_drvinfo, .get_regs_len = rtl8169_get_regs_len, @@ -1513,6 +1524,8 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_strings = rtl8169_get_strings, .get_sset_count = rtl8169_get_sset_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, + .set_flags = rtl8169_set_flags, + .get_flags = ethtool_op_get_flags, }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, @@ -1561,6 +1574,9 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, /* 8101 family. */ + { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, + { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, @@ -2437,6 +2453,33 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); } +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0005 }, + { 0x1a, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0004 }, + { 0x1c, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x15, 0x7701 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0) + netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n"); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + static void rtl_hw_phy_config(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -2504,6 +2547,10 @@ static void rtl_hw_phy_config(struct net_device *dev) case RTL_GIGA_MAC_VER_28: rtl8168d_4_hw_phy_config(tp); break; + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + rtl8105e_hw_phy_config(tp); + break; default: break; @@ -2635,11 +2682,12 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) rtl8169_phy_reset(dev, tp); - /* - * rtl8169_set_speed_xmii takes good care of the Fast Ethernet - * only 8101. Don't panic. - */ - rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL); + rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0); if (RTL_R8(PHYstatus) & TBI_Enable) netif_info(tp, link, dev, "TBI auto-negotiating\n"); @@ -2795,9 +2843,6 @@ static const struct net_device_ops rtl8169_netdev_ops = { .ndo_set_mac_address = rtl_set_mac_address, .ndo_do_ioctl = rtl8169_ioctl, .ndo_set_multicast_list = rtl_set_rx_mode, -#ifdef CONFIG_R8169_VLAN - .ndo_vlan_rx_register = rtl8169_vlan_rx_register, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = rtl8169_netpoll, #endif @@ -2952,6 +2997,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_09: case RTL_GIGA_MAC_VER_10: case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: ops->down = r810x_pll_power_down; ops->up = r810x_pll_power_up; break; @@ -3104,6 +3151,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Identify chip attached to board */ rtl8169_get_mac_version(tp, ioaddr); + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + tp->cp_cmd |= RxVlan; + rtl_init_mdio_ops(tp); rtl_init_pll_power_ops(tp); @@ -3172,10 +3226,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); -#ifdef CONFIG_R8169_VLAN - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; -#endif - dev->features |= NETIF_F_GRO; + dev->features |= NETIF_F_HW_VLAN_TX_RX | NETIF_F_GRO; tp->intr_mask = 0xffff; tp->hw_start = cfg->hw_start; @@ -3293,12 +3344,7 @@ static int rtl8169_open(struct net_device *dev) rtl8169_init_phy(dev, tp); - /* - * Pretend we are using VLANs; This bypasses a nasty bug where - * Interrupts stop flowing on high load on 8110SCd controllers. - */ - if (tp->mac_version == RTL_GIGA_MAC_VER_05) - RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan); + rtl8169_vlan_mode(dev); rtl_pll_power_up(tp); @@ -3915,6 +3961,37 @@ static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) rtl_ephy_write(ioaddr, 0x03, 0xc2f9); } +static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev) +{ + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idel */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + /* disable Early Tally Counter */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000); + + RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(DLLPR, RTL_R8(DLLPR) | PM_SWITCH); + + rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); +} + +static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev) +{ + rtl_hw_start_8105e_1(ioaddr, pdev); + rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000); +} + static void rtl_hw_start_8101(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -3945,6 +4022,13 @@ static void rtl_hw_start_8101(struct net_device *dev) case RTL_GIGA_MAC_VER_09: rtl_hw_start_8102e_2(ioaddr, pdev); break; + + case RTL_GIGA_MAC_VER_29: + rtl_hw_start_8105e_1(ioaddr, pdev); + break; + case RTL_GIGA_MAC_VER_30: + rtl_hw_start_8105e_2(ioaddr, pdev); + break; } RTL_W8(Cfg9346, Cfg9346_Lock); @@ -4603,12 +4687,12 @@ static int rtl8169_rx_interrupt(struct net_device *dev, skb_put(skb, pkt_size); skb->protocol = eth_type_trans(skb, dev); - if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) { - if (likely(polling)) - napi_gro_receive(&tp->napi, skb); - else - netif_rx(skb); - } + rtl8169_rx_vlan_tag(desc, skb); + + if (likely(polling)) + napi_gro_receive(&tp->napi, skb); + else + netif_rx(skb); dev->stats.rx_bytes += pkt_size; dev->stats.rx_packets++; diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 39c17cecb8b9..2ad6364103ea 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -7556,7 +7556,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) */ skb->ip_summed = CHECKSUM_UNNECESSARY; if (ring_data->lro) { - u32 tcp_len; + u32 tcp_len = 0; u8 *tcp; int ret = 0; diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 002bac743843..b8bd936374f2 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2005-2009 Solarflare Communications Inc. + * Copyright 2005-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -21,6 +21,7 @@ #include <linux/ethtool.h> #include <linux/topology.h> #include <linux/gfp.h> +#include <linux/cpu_rmap.h> #include "net_driver.h" #include "efx.h" #include "nic.h" @@ -307,6 +308,8 @@ static int efx_poll(struct napi_struct *napi, int budget) channel->irq_mod_score = 0; } + efx_filter_rfs_expire(channel); + /* There is no race here; although napi_disable() will * only wait for napi_complete(), this isn't a problem * since efx_channel_processed() will have no effect if @@ -673,7 +676,7 @@ static void efx_fini_channels(struct efx_nic *efx) efx_for_each_channel_rx_queue(rx_queue, channel) efx_fini_rx_queue(rx_queue); - efx_for_each_channel_tx_queue(tx_queue, channel) + efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_fini_tx_queue(tx_queue); efx_fini_eventq(channel); } @@ -689,7 +692,7 @@ static void efx_remove_channel(struct efx_channel *channel) efx_for_each_channel_rx_queue(rx_queue, channel) efx_remove_rx_queue(rx_queue); - efx_for_each_channel_tx_queue(tx_queue, channel) + efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_remove_tx_queue(tx_queue); efx_remove_eventq(channel); } @@ -1101,8 +1104,8 @@ static int efx_init_io(struct efx_nic *efx) rc = -EIO; goto fail3; } - efx->membase = ioremap_nocache(efx->membase_phys, - efx->type->mem_map_size); + efx->membase = ioremap_wc(efx->membase_phys, + efx->type->mem_map_size); if (!efx->membase) { netif_err(efx, probe, efx->net_dev, "could not map memory BAR at %llx+%x\n", @@ -1175,10 +1178,32 @@ static int efx_wanted_channels(void) return count; } +static int +efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) +{ +#ifdef CONFIG_RFS_ACCEL + int i, rc; + + efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); + if (!efx->net_dev->rx_cpu_rmap) + return -ENOMEM; + for (i = 0; i < efx->n_rx_channels; i++) { + rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, + xentries[i].vector); + if (rc) { + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; + return rc; + } + } +#endif + return 0; +} + /* Probe the number and type of interrupts we are able to obtain, and * the resulting numbers of channels and RX queues. */ -static void efx_probe_interrupts(struct efx_nic *efx) +static int efx_probe_interrupts(struct efx_nic *efx) { int max_channels = min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); @@ -1220,6 +1245,11 @@ static void efx_probe_interrupts(struct efx_nic *efx) efx->n_tx_channels = efx->n_channels; efx->n_rx_channels = efx->n_channels; } + rc = efx_init_rx_cpu_rmap(efx, xentries); + if (rc) { + pci_disable_msix(efx->pci_dev); + return rc; + } for (i = 0; i < n_channels; i++) efx_get_channel(efx, i)->irq = xentries[i].vector; @@ -1253,6 +1283,8 @@ static void efx_probe_interrupts(struct efx_nic *efx) efx->n_tx_channels = 1; efx->legacy_irq = efx->pci_dev->irq; } + + return 0; } static void efx_remove_interrupts(struct efx_nic *efx) @@ -1271,21 +1303,8 @@ static void efx_remove_interrupts(struct efx_nic *efx) static void efx_set_channels(struct efx_nic *efx) { - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; - efx->tx_channel_offset = separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; - - /* Channel pointers were set in efx_init_struct() but we now - * need to clear them for TX queues in any RX-only channels. */ - efx_for_each_channel(channel, efx) { - if (channel->channel - efx->tx_channel_offset >= - efx->n_tx_channels) { - efx_for_each_channel_tx_queue(tx_queue, channel) - tx_queue->channel = NULL; - } - } } static int efx_probe_nic(struct efx_nic *efx) @@ -1302,7 +1321,9 @@ static int efx_probe_nic(struct efx_nic *efx) /* Determine the number of channels and queues by trying to hook * in MSI-X interrupts. */ - efx_probe_interrupts(efx); + rc = efx_probe_interrupts(efx); + if (rc) + goto fail; if (efx->n_channels > 1) get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); @@ -1317,6 +1338,10 @@ static int efx_probe_nic(struct efx_nic *efx) efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); return 0; + +fail: + efx->type->remove(efx); + return rc; } static void efx_remove_nic(struct efx_nic *efx) @@ -1531,9 +1556,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, efx->irq_rx_adaptive = rx_adaptive; efx->irq_rx_moderation = rx_ticks; efx_for_each_channel(channel, efx) { - if (efx_channel_get_rx_queue(channel)) + if (efx_channel_has_rx_queue(channel)) channel->irq_moderation = rx_ticks; - else if (efx_channel_get_tx_queue(channel, 0)) + else if (efx_channel_has_tx_queues(channel)) channel->irq_moderation = tx_ticks; } } @@ -1849,6 +1874,10 @@ static const struct net_device_ops efx_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = efx_netpoll, #endif + .ndo_setup_tc = efx_setup_tc, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = efx_filter_rfs, +#endif }; static void efx_update_name(struct efx_nic *efx) @@ -1910,10 +1939,8 @@ static int efx_register_netdev(struct efx_nic *efx) efx_for_each_channel(channel, efx) { struct efx_tx_queue *tx_queue; - efx_for_each_channel_tx_queue(tx_queue, channel) { - tx_queue->core_txq = netdev_get_tx_queue( - efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES); - } + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_init_tx_queue_core_txq(tx_queue); } /* Always start with carrier off; PHY events will detect the link */ @@ -2288,6 +2315,10 @@ static void efx_fini_struct(struct efx_nic *efx) */ static void efx_pci_remove_main(struct efx_nic *efx) { +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); + efx->net_dev->rx_cpu_rmap = NULL; +#endif efx_nic_fini_interrupt(efx); efx_fini_channels(efx); efx_fini_port(efx); @@ -2401,7 +2432,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, int i, rc; /* Allocate and initialise a struct net_device and struct efx_nic */ - net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); + net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, + EFX_MAX_RX_QUEUES); if (!net_dev) return -ENOMEM; net_dev->features |= (type->offload_features | NETIF_F_SG | diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index d43a7e5212b1..3d83a1f74fef 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -29,6 +29,7 @@ extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); +extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); extern netdev_tx_t @@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); +extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); @@ -74,6 +76,21 @@ extern int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec); extern void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority); +#ifdef CONFIG_RFS_ACCEL +extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota); +static inline void efx_filter_rfs_expire(struct efx_channel *channel) +{ + if (channel->rfs_filters_added >= 60 && + __efx_filter_rfs_expire(channel->efx, 100)) + channel->rfs_filters_added -= 60; +} +#define efx_filter_rfs_enabled() 1 +#else +static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} +#define efx_filter_rfs_enabled() 0 +#endif /* Channels */ extern void efx_process_channel_now(struct efx_channel *channel); diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index ca886d98bdc7..807178ef65ad 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -28,7 +28,8 @@ struct efx_ethtool_stat { enum { EFX_ETHTOOL_STAT_SOURCE_mac_stats, EFX_ETHTOOL_STAT_SOURCE_nic, - EFX_ETHTOOL_STAT_SOURCE_channel + EFX_ETHTOOL_STAT_SOURCE_channel, + EFX_ETHTOOL_STAT_SOURCE_tx_queue } source; unsigned offset; u64(*get_stat) (void *field); /* Reader function */ @@ -86,6 +87,10 @@ static u64 efx_get_atomic_stat(void *field) EFX_ETHTOOL_STAT(field, channel, n_##field, \ unsigned int, efx_get_uint_stat) +#define EFX_ETHTOOL_UINT_TXQ_STAT(field) \ + EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \ + unsigned int, efx_get_uint_stat) + static struct efx_ethtool_stat efx_ethtool_stats[] = { EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), @@ -116,6 +121,10 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = { EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers), + EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets), + EFX_ETHTOOL_UINT_TXQ_STAT(pushes), EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), @@ -237,8 +246,8 @@ static void efx_ethtool_get_drvinfo(struct net_device *net_dev, strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) - siena_print_fwver(efx, info->fw_version, - sizeof(info->fw_version)); + efx_mcdi_print_fwver(efx, info->fw_version, + sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); } @@ -470,6 +479,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, struct efx_mac_stats *mac_stats = &efx->mac_stats; struct efx_ethtool_stat *stat; struct efx_channel *channel; + struct efx_tx_queue *tx_queue; struct rtnl_link_stats64 temp; int i; @@ -495,6 +505,15 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, data[i] += stat->get_stat((void *)channel + stat->offset); break; + case EFX_ETHTOOL_STAT_SOURCE_tx_queue: + data[i] = 0; + efx_for_each_channel(channel, efx) { + efx_for_each_channel_tx_queue(tx_queue, channel) + data[i] += + stat->get_stat((void *)tx_queue + + stat->offset); + } + break; } } } @@ -502,7 +521,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev, static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) { struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev); - unsigned long features; + u32 features; features = NETIF_F_TSO; if (efx->type->offload_features & NETIF_F_V6_CSUM) @@ -519,7 +538,7 @@ static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) { struct efx_nic *efx = netdev_priv(net_dev); - unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM; + u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM; if (enable) net_dev->features |= features; @@ -635,7 +654,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, /* Find lowest IRQ moderation across all used TX queues */ coalesce->tx_coalesce_usecs_irq = ~((u32) 0); efx_for_each_channel(channel, efx) { - if (!efx_channel_get_tx_queue(channel, 0)) + if (!efx_channel_has_tx_queues(channel)) continue; if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { if (channel->channel < efx->n_rx_channels) @@ -680,8 +699,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, /* If the channel is shared only allow RX parameters to be set */ efx_for_each_channel(channel, efx) { - if (efx_channel_get_rx_queue(channel) && - efx_channel_get_tx_queue(channel, 0) && + if (efx_channel_has_rx_queue(channel) && + efx_channel_has_tx_queues(channel) && tx_usecs) { netif_err(efx, drv, efx->net_dev, "Channel is shared. " "Only RX coalescing may be set\n"); diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 61ddd2c6e750..734fcfb52e85 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -1478,36 +1478,26 @@ static void falcon_init_rx_cfg(struct efx_nic *efx) /* RX control FIFO thresholds (32 entries) */ const unsigned ctrl_xon_thr = 20; const unsigned ctrl_xoff_thr = 25; - /* RX data FIFO thresholds (256-byte units; size varies) */ - int data_xon_thr = efx_nic_rx_xon_thresh >> 8; - int data_xoff_thr = efx_nic_rx_xoff_thresh >> 8; efx_oword_t reg; efx_reado(efx, ®, FR_AZ_RX_CFG); if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { /* Data FIFO size is 5.5K */ - if (data_xon_thr < 0) - data_xon_thr = 512 >> 8; - if (data_xoff_thr < 0) - data_xoff_thr = 2048 >> 8; EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, huge_buf_size); - EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr); - EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); + EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); } else { /* Data FIFO size is 80K; register fields moved */ - if (data_xon_thr < 0) - data_xon_thr = 27648 >> 8; /* ~3*max MTU */ - if (data_xoff_thr < 0) - data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, huge_buf_size); - EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr); - EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr); + /* Send XON and XOFF at ~3 * max MTU away from empty/full */ + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c index 2dd16f0b3ced..b9cc846811d6 100644 --- a/drivers/net/sfc/falcon_boards.c +++ b/drivers/net/sfc/falcon_boards.c @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2007-2009 Solarflare Communications Inc. + * Copyright 2007-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index b49e84394641..2c9ee5db3bf7 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c index d4722c41c4ce..95a980fd63d5 100644 --- a/drivers/net/sfc/filter.c +++ b/drivers/net/sfc/filter.c @@ -8,6 +8,7 @@ */ #include <linux/in.h> +#include <net/ip.h> #include "efx.h" #include "filter.h" #include "io.h" @@ -27,6 +28,10 @@ */ #define FILTER_CTL_SRCH_MAX 200 +/* Don't try very hard to find space for performance hints, as this is + * counter-productive. */ +#define FILTER_CTL_SRCH_HINT_MAX 5 + enum efx_filter_table_id { EFX_FILTER_TABLE_RX_IP = 0, EFX_FILTER_TABLE_RX_MAC, @@ -47,6 +52,10 @@ struct efx_filter_table { struct efx_filter_state { spinlock_t lock; struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; +#ifdef CONFIG_RFS_ACCEL + u32 *rps_flow_id; + unsigned rps_expire_index; +#endif }; /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit @@ -325,15 +334,16 @@ static int efx_filter_search(struct efx_filter_table *table, struct efx_filter_spec *spec, u32 key, bool for_insert, int *depth_required) { - unsigned hash, incr, filter_idx, depth; + unsigned hash, incr, filter_idx, depth, depth_max; struct efx_filter_spec *cmp; hash = efx_filter_hash(key); incr = efx_filter_increment(key); + depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ? + FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX); for (depth = 1, filter_idx = hash & (table->size - 1); - depth <= FILTER_CTL_SRCH_MAX && - test_bit(filter_idx, table->used_bitmap); + depth <= depth_max && test_bit(filter_idx, table->used_bitmap); ++depth) { cmp = &table->spec[filter_idx]; if (efx_filter_equal(spec, cmp)) @@ -342,7 +352,7 @@ static int efx_filter_search(struct efx_filter_table *table, } if (!for_insert) return -ENOENT; - if (depth > FILTER_CTL_SRCH_MAX) + if (depth > depth_max) return -EBUSY; found: *depth_required = depth; @@ -562,6 +572,13 @@ int efx_probe_filters(struct efx_nic *efx) spin_lock_init(&state->lock); if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { +#ifdef CONFIG_RFS_ACCEL + state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS, + sizeof(*state->rps_flow_id), + GFP_KERNEL); + if (!state->rps_flow_id) + goto fail; +#endif table = &state->table[EFX_FILTER_TABLE_RX_IP]; table->id = EFX_FILTER_TABLE_RX_IP; table->offset = FR_BZ_RX_FILTER_TBL0; @@ -607,5 +624,97 @@ void efx_remove_filters(struct efx_nic *efx) kfree(state->table[table_id].used_bitmap); vfree(state->table[table_id].spec); } +#ifdef CONFIG_RFS_ACCEL + kfree(state->rps_flow_id); +#endif kfree(state); } + +#ifdef CONFIG_RFS_ACCEL + +int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + struct efx_filter_state *state = efx->filter_state; + struct efx_filter_spec spec; + const struct iphdr *ip; + const __be16 *ports; + int nhoff; + int rc; + + nhoff = skb_network_offset(skb); + + if (skb->protocol != htons(ETH_P_IP)) + return -EPROTONOSUPPORT; + + /* RFS must validate the IP header length before calling us */ + EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip))); + ip = (const struct iphdr *)(skb->data + nhoff); + if (ip->frag_off & htons(IP_MF | IP_OFFSET)) + return -EPROTONOSUPPORT; + EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4)); + ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index); + rc = efx_filter_set_ipv4_full(&spec, ip->protocol, + ip->daddr, ports[1], ip->saddr, ports[0]); + if (rc) + return rc; + + rc = efx_filter_insert_filter(efx, &spec, true); + if (rc < 0) + return rc; + + /* Remember this so we can check whether to expire the filter later */ + state->rps_flow_id[rc] = flow_id; + channel = efx_get_channel(efx, skb_get_rx_queue(skb)); + ++channel->rfs_filters_added; + + netif_info(efx, rx_status, efx->net_dev, + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", + (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP", + &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]), + rxq_index, flow_id, rc); + + return rc; +} + +bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota) +{ + struct efx_filter_state *state = efx->filter_state; + struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP]; + unsigned mask = table->size - 1; + unsigned index; + unsigned stop; + + if (!spin_trylock_bh(&state->lock)) + return false; + + index = state->rps_expire_index; + stop = (index + quota) & mask; + + while (index != stop) { + if (test_bit(index, table->used_bitmap) && + table->spec[index].priority == EFX_FILTER_PRI_HINT && + rps_may_expire_flow(efx->net_dev, + table->spec[index].dmaq_id, + state->rps_flow_id[index], index)) { + netif_info(efx, rx_status, efx->net_dev, + "expiring filter %d [flow %u]\n", + index, state->rps_flow_id[index]); + efx_filter_table_clear_entry(efx, table, index); + } + index = (index + 1) & mask; + } + + state->rps_expire_index = stop; + if (table->used == 0) + efx_filter_table_reset_search_depth(table); + + spin_unlock_bh(&state->lock); + return true; +} + +#endif /* CONFIG_RFS_ACCEL */ diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h index 6da4ae20a039..d9d8c2ef1074 100644 --- a/drivers/net/sfc/io.h +++ b/drivers/net/sfc/io.h @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -48,9 +48,9 @@ * replacing the low 96 bits with zero does not affect functionality. * - If the host writes to the last dword address of such a register * (i.e. the high 32 bits) the underlying register will always be - * written. If the collector does not hold values for the low 96 - * bits of the register, they will be written as zero. Writing to - * the last qword does not have this effect and must not be done. + * written. If the collector and the current write together do not + * provide values for all 128 bits of the register, the low 96 bits + * will be written as zero. * - If the host writes to the address of any other part of such a * register while the collector already holds values for some other * register, the write is discarded and the collector maintains its @@ -103,6 +103,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, _efx_writed(efx, value->u32[2], reg + 8); _efx_writed(efx, value->u32[3], reg + 12); #endif + wmb(); mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } @@ -125,6 +126,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, __raw_writel((__force u32)value->u32[0], membase + addr); __raw_writel((__force u32)value->u32[1], membase + addr + 4); #endif + wmb(); mmiowb(); spin_unlock_irqrestore(&efx->biu_lock, flags); } @@ -139,6 +141,7 @@ static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, /* No lock required */ _efx_writed(efx, value->u32[0], reg); + wmb(); } /* Read a 128-bit CSR, locking as appropriate. */ @@ -237,12 +240,14 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, #ifdef EFX_USE_QWORD_IO _efx_writeq(efx, value->u64[0], reg + 0); + _efx_writeq(efx, value->u64[1], reg + 8); #else _efx_writed(efx, value->u32[0], reg + 0); _efx_writed(efx, value->u32[1], reg + 4); -#endif _efx_writed(efx, value->u32[2], reg + 8); _efx_writed(efx, value->u32[3], reg + 12); +#endif + wmb(); } #define efx_writeo_page(efx, value, reg, page) \ _efx_writeo_page(efx, value, \ diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index b716e827b291..5e118f0d2479 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2008-2009 Solarflare Communications Inc. + * Copyright 2008-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -94,14 +94,15 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, efx_writed(efx, &hdr, pdu); - for (i = 0; i < inlen; i += 4) + for (i = 0; i < inlen; i += 4) { _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); - - /* Ensure the payload is written out before the header */ - wmb(); + /* use wmb() within loop to inhibit write combining */ + wmb(); + } /* ring the doorbell with a distinctive value */ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); + wmb(); } static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) @@ -602,7 +603,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, ************************************************************************** */ -int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) +void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) { u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; size_t outlength; @@ -616,29 +617,20 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) if (rc) goto fail; - if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) { - *version = 0; - *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE); - return 0; - } - if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { rc = -EIO; goto fail; } ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); - *version = (((u64)le16_to_cpu(ver_words[0]) << 48) | - ((u64)le16_to_cpu(ver_words[1]) << 32) | - ((u64)le16_to_cpu(ver_words[2]) << 16) | - le16_to_cpu(ver_words[3])); - *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE); - - return 0; + snprintf(buf, len, "%u.%u.%u.%u", + le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), + le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); + return; fail: netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); - return rc; + buf[0] = 0; } int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h index c792f1d65e48..aced2a7856fc 100644 --- a/drivers/net/sfc/mcdi.h +++ b/drivers/net/sfc/mcdi.h @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2008-2009 Solarflare Communications Inc. + * Copyright 2008-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -93,7 +93,7 @@ extern void efx_mcdi_process_event(struct efx_channel *channel, #define MCDI_EVENT_FIELD(_ev, _field) \ EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) -extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build); +extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached_out); extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c index f88f4bf986ff..33f7294edb47 100644 --- a/drivers/net/sfc/mcdi_mac.c +++ b/drivers/net/sfc/mcdi_mac.c @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2009 Solarflare Communications Inc. + * Copyright 2009-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h index 90359e644006..b86a15f221ad 100644 --- a/drivers/net/sfc/mcdi_pcol.h +++ b/drivers/net/sfc/mcdi_pcol.h @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2009 Solarflare Communications Inc. + * Copyright 2009-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c index 0e97eed663c6..ec3f740f5465 100644 --- a/drivers/net/sfc/mcdi_phy.c +++ b/drivers/net/sfc/mcdi_phy.c @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2009 Solarflare Communications Inc. + * Copyright 2009-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c index 56b0266b441f..19e68c26d103 100644 --- a/drivers/net/sfc/mdio_10g.c +++ b/drivers/net/sfc/mdio_10g.c @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -51,13 +51,10 @@ int efx_mdio_reset_mmd(struct efx_nic *port, int mmd, return spins ? spins : -ETIMEDOUT; } -static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal) +static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd) { int status; - if (LOOPBACK_INTERNAL(efx)) - return 0; - if (mmd != MDIO_MMD_AN) { /* Read MMD STATUS2 to check it is responding. */ status = efx_mdio_read(efx, mmd, MDIO_STAT2); @@ -68,20 +65,6 @@ static int efx_mdio_check_mmd(struct efx_nic *efx, int mmd, int fault_fatal) } } - /* Read MMD STATUS 1 to check for fault. */ - status = efx_mdio_read(efx, mmd, MDIO_STAT1); - if (status & MDIO_STAT1_FAULT) { - if (fault_fatal) { - netif_err(efx, hw, efx->net_dev, - "PHY MMD %d reporting fatal" - " fault: status %x\n", mmd, status); - return -EIO; - } else { - netif_dbg(efx, hw, efx->net_dev, - "PHY MMD %d reporting status" - " %x (expected)\n", mmd, status); - } - } return 0; } @@ -130,8 +113,7 @@ int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask) return rc; } -int efx_mdio_check_mmds(struct efx_nic *efx, - unsigned int mmd_mask, unsigned int fatal_mask) +int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask) { int mmd = 0, probe_mmd, devs1, devs2; u32 devices; @@ -161,13 +143,9 @@ int efx_mdio_check_mmds(struct efx_nic *efx, /* Check all required MMDs are responding and happy. */ while (mmd_mask) { - if (mmd_mask & 1) { - int fault_fatal = fatal_mask & 1; - if (efx_mdio_check_mmd(efx, mmd, fault_fatal)) - return -EIO; - } + if ((mmd_mask & 1) && efx_mdio_check_mmd(efx, mmd)) + return -EIO; mmd_mask = mmd_mask >> 1; - fatal_mask = fatal_mask >> 1; mmd++; } @@ -337,7 +315,7 @@ int efx_mdio_test_alive(struct efx_nic *efx) "no MDIO PHY present with ID %d\n", efx->mdio.prtad); rc = -EINVAL; } else { - rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0); + rc = efx_mdio_check_mmds(efx, efx->mdio.mmds); } mutex_unlock(&efx->mac_lock); diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h index 75791d3d4963..df0703940c83 100644 --- a/drivers/net/sfc/mdio_10g.h +++ b/drivers/net/sfc/mdio_10g.h @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -68,8 +68,7 @@ extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime); /* As efx_mdio_check_mmd but for multiple MMDs */ -int efx_mdio_check_mmds(struct efx_nic *efx, - unsigned int mmd_mask, unsigned int fatal_mask); +int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask); /* Check the link status of specified mmds in bit mask */ extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask); diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c index d38627448c22..e646bfce2d84 100644 --- a/drivers/net/sfc/mtd.c +++ b/drivers/net/sfc/mtd.c @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 28df8665256a..215d5c51bfa0 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2005-2009 Solarflare Communications Inc. + * Copyright 2005-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -41,7 +41,7 @@ * **************************************************************************/ -#define EFX_DRIVER_VERSION "3.0" +#define EFX_DRIVER_VERSION "3.1" #ifdef EFX_ENABLE_DEBUG #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) @@ -63,10 +63,12 @@ /* Checksum generation is a per-queue option in hardware, so each * queue visible to the networking core is backed by two hardware TX * queues. */ -#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS -#define EFX_TXQ_TYPE_OFFLOAD 1 -#define EFX_TXQ_TYPES 2 -#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) +#define EFX_MAX_TX_TC 2 +#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) +#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */ +#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */ +#define EFX_TXQ_TYPES 4 +#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) /** * struct efx_special_buffer - An Efx special buffer @@ -140,6 +142,7 @@ struct efx_tx_buffer { * @buffer: The software buffer ring * @txd: The hardware descriptor ring * @ptr_mask: The size of the ring minus 1. + * @initialised: Has hardware queue been initialised? * @flushed: Used when handling queue flushing * @read_count: Current read pointer. * This is the number of buffers that have been removed from both rings. @@ -182,6 +185,7 @@ struct efx_tx_queue { struct efx_tx_buffer *buffer; struct efx_special_buffer txd; unsigned int ptr_mask; + bool initialised; enum efx_flush_state flushed; /* Members used mainly on the completion path */ @@ -210,15 +214,17 @@ struct efx_tx_queue { * If both this and page are %NULL, the buffer slot is currently free. * @page: The associated page buffer, if any. * If both this and skb are %NULL, the buffer slot is currently free. - * @data: Pointer to ethernet header * @len: Buffer length, in bytes. + * @is_page: Indicates if @page is valid. If false, @skb is valid. */ struct efx_rx_buffer { dma_addr_t dma_addr; - struct sk_buff *skb; - struct page *page; - char *data; + union { + struct sk_buff *skb; + struct page *page; + } u; unsigned int len; + bool is_page; }; /** @@ -358,6 +364,9 @@ struct efx_channel { unsigned int irq_count; unsigned int irq_mod_score; +#ifdef CONFIG_RFS_ACCEL + unsigned int rfs_filters_added; +#endif int rx_alloc_level; int rx_alloc_push_pages; @@ -377,7 +386,7 @@ struct efx_channel { bool rx_pkt_csummed; struct efx_rx_queue rx_queue; - struct efx_tx_queue tx_queue[2]; + struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; }; enum efx_led_mode { @@ -906,7 +915,7 @@ struct efx_nic_type { unsigned int phys_addr_channels; unsigned int tx_dc_base; unsigned int rx_dc_base; - unsigned long offload_features; + u32 offload_features; u32 reset_world_flags; }; @@ -938,18 +947,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; } +static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) +{ + return channel->channel - channel->efx->tx_channel_offset < + channel->efx->n_tx_channels; +} + static inline struct efx_tx_queue * efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) { - struct efx_tx_queue *tx_queue = channel->tx_queue; - EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); - return tx_queue->channel ? tx_queue + type : NULL; + EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) || + type >= EFX_TXQ_TYPES); + return &channel->tx_queue[type]; +} + +static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) +{ + return !(tx_queue->efx->net_dev->num_tc < 2 && + tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); } /* Iterate over all TX queues belonging to a channel */ #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ - for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ - _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ + if (!efx_channel_has_tx_queues(_channel)) \ + ; \ + else \ + for (_tx_queue = (_channel)->tx_queue; \ + _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ + efx_tx_queue_used(_tx_queue); \ + _tx_queue++) + +/* Iterate over all possible TX queues belonging to a channel */ +#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ + for (_tx_queue = (_channel)->tx_queue; \ + _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ _tx_queue++) static inline struct efx_rx_queue * @@ -959,18 +990,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index) return &efx->channel[index]->rx_queue; } +static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) +{ + return channel->channel < channel->efx->n_rx_channels; +} + static inline struct efx_rx_queue * efx_channel_get_rx_queue(struct efx_channel *channel) { - return channel->channel < channel->efx->n_rx_channels ? - &channel->rx_queue : NULL; + EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel)); + return &channel->rx_queue; } /* Iterate over all RX queues belonging to a channel */ #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ - for (_rx_queue = efx_channel_get_rx_queue(channel); \ - _rx_queue; \ - _rx_queue = NULL) + if (!efx_channel_has_rx_queue(_channel)) \ + ; \ + else \ + for (_rx_queue = &(_channel)->rx_queue; \ + _rx_queue; \ + _rx_queue = NULL) static inline struct efx_channel * efx_rx_queue_channel(struct efx_rx_queue *rx_queue) diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index da386599ab68..e8396614daf3 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -41,26 +41,6 @@ #define RX_DC_ENTRIES 64 #define RX_DC_ENTRIES_ORDER 3 -/* RX FIFO XOFF watermark - * - * When the amount of the RX FIFO increases used increases past this - * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A) - * This also has an effect on RX/TX arbitration - */ -int efx_nic_rx_xoff_thresh = -1; -module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644); -MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold"); - -/* RX FIFO XON watermark - * - * When the amount of the RX FIFO used decreases below this - * watermark send XON. Only used if TX flow control is enabled (ethtool -A) - * This also has an effect on RX/TX arbitration - */ -int efx_nic_rx_xon_thresh = -1; -module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644); -MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); - /* If EFX_MAX_INT_ERRORS internal errors occur within * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and * disable it. @@ -445,8 +425,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) void efx_nic_init_tx(struct efx_tx_queue *tx_queue) { - efx_oword_t tx_desc_ptr; struct efx_nic *efx = tx_queue->efx; + efx_oword_t reg; tx_queue->flushed = FLUSH_NONE; @@ -454,7 +434,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) efx_init_special_buffer(efx, &tx_queue->txd); /* Push TX descriptor ring to card */ - EFX_POPULATE_OWORD_10(tx_desc_ptr, + EFX_POPULATE_OWORD_10(reg, FRF_AZ_TX_DESCQ_EN, 1, FRF_AZ_TX_ISCSI_DDIG_EN, 0, FRF_AZ_TX_ISCSI_HDIG_EN, 0, @@ -470,17 +450,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; - EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); - EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); + EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, !csum); } - efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, + efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, tx_queue->queue); if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { - efx_oword_t reg; - /* Only 128 bits in this register */ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); @@ -491,6 +469,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) set_bit_le(tx_queue->queue, (void *)®); efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); } + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + EFX_POPULATE_OWORD_1(reg, + FRF_BZ_TX_PACE, + (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? + FFE_BZ_TX_PACE_OFF : + FFE_BZ_TX_PACE_RESERVED); + efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, + tx_queue->queue); + } } static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) @@ -1238,8 +1226,10 @@ int efx_nic_flush_queues(struct efx_nic *efx) /* Flush all tx queues in parallel */ efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) - efx_flush_tx_queue(tx_queue); + efx_for_each_possible_channel_tx_queue(tx_queue, channel) { + if (tx_queue->initialised) + efx_flush_tx_queue(tx_queue); + } } /* The hardware supports four concurrent rx flushes, each of which may @@ -1262,8 +1252,9 @@ int efx_nic_flush_queues(struct efx_nic *efx) ++rx_pending; } } - efx_for_each_channel_tx_queue(tx_queue, channel) { - if (tx_queue->flushed != FLUSH_DONE) + efx_for_each_possible_channel_tx_queue(tx_queue, channel) { + if (tx_queue->initialised && + tx_queue->flushed != FLUSH_DONE) ++tx_pending; } } @@ -1278,8 +1269,9 @@ int efx_nic_flush_queues(struct efx_nic *efx) /* Mark the queues as all flushed. We're going to return failure * leading to a reset, or fake up success anyway */ efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) { - if (tx_queue->flushed != FLUSH_DONE) + efx_for_each_possible_channel_tx_queue(tx_queue, channel) { + if (tx_queue->initialised && + tx_queue->flushed != FLUSH_DONE) netif_err(efx, hw, efx->net_dev, "tx queue %d flush command timed out\n", tx_queue->queue); @@ -1682,6 +1674,19 @@ void efx_nic_init_common(struct efx_nic *efx) if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); + + if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { + EFX_POPULATE_OWORD_4(temp, + /* Default values */ + FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, + FRF_BZ_TX_PACE_SB_AF, 0xb, + FRF_BZ_TX_PACE_FB_BASE, 0, + /* Allow large pace values in the + * fast bin. */ + FRF_BZ_TX_PACE_BIN_TH, + FFE_BZ_TX_PACE_RESERVED); + efx_writeo(efx, &temp, FR_BZ_TX_PACE); + } } /* Register dump */ diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index eb0586925b51..d9de1b647d41 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -142,20 +142,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) /** * struct siena_nic_data - Siena NIC state - * @fw_version: Management controller firmware version - * @fw_build: Firmware build number * @mcdi: Management-Controller-to-Driver Interface * @wol_filter_id: Wake-on-LAN packet filter id */ struct siena_nic_data { - u64 fw_version; - u32 fw_build; struct efx_mcdi_iface mcdi; int wol_filter_id; }; -extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len); - extern struct efx_nic_type falcon_a1_nic_type; extern struct efx_nic_type falcon_b0_nic_type; extern struct efx_nic_type siena_a0_nic_type; @@ -194,7 +188,6 @@ extern void efx_nic_eventq_read_ack(struct efx_channel *channel); /* MAC/PHY */ extern void falcon_drain_tx_fifo(struct efx_nic *efx); extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); -extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh; /* Interrupts and test events */ extern int efx_nic_init_interrupt(struct efx_nic *efx); diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h index 1dab609757fb..b3b79472421e 100644 --- a/drivers/net/sfc/phy.h +++ b/drivers/net/sfc/phy.h @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2007-2009 Solarflare Communications Inc. + * Copyright 2007-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c index ea3ae0089315..55f90924247e 100644 --- a/drivers/net/sfc/qt202x_phy.c +++ b/drivers/net/sfc/qt202x_phy.c @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h index 96430ed81c36..cc2c86b76a7b 100644 --- a/drivers/net/sfc/regs.h +++ b/drivers/net/sfc/regs.h @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -2907,6 +2907,12 @@ #define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 #define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 +/* TX_PACE_TBL */ +/* Values >20 are documented as reserved, but will result in a queue going + * into the fast bin with a pace value of zero. */ +#define FFE_BZ_TX_PACE_OFF 0 +#define FFE_BZ_TX_PACE_RESERVED 21 + /* DRIVER_EV */ /* Sub-fields of an RX flush completion event */ #define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 3925fd621177..c0fdb59030fb 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2005-2009 Solarflare Communications Inc. + * Copyright 2005-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -89,24 +89,37 @@ static unsigned int rx_refill_limit = 95; */ #define EFX_RXD_HEAD_ROOM 2 -static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) +/* Offset of ethernet header within page */ +static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, + struct efx_rx_buffer *buf) { /* Offset is always within one page, so we don't need to consider * the page order. */ - return (__force unsigned long) buf->data & (PAGE_SIZE - 1); + return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) + + efx->type->rx_buffer_hash_size); } static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) { return PAGE_SIZE << efx->rx_buffer_order; } -static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf) +static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) { + if (buf->is_page) + return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); + else + return ((u8 *)buf->u.skb->data + + efx->type->rx_buffer_hash_size); +} + +static inline u32 efx_rx_buf_hash(const u8 *eh) +{ + /* The ethernet header is always directly after any hash. */ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 - return __le32_to_cpup((const __le32 *)(buf->data - 4)); + return __le32_to_cpup((const __le32 *)(eh - 4)); #else - const u8 *data = (const u8 *)(buf->data - 4); + const u8 *data = eh - 4; return ((u32)data[0] | (u32)data[1] << 8 | (u32)data[2] << 16 | @@ -129,6 +142,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) struct efx_nic *efx = rx_queue->efx; struct net_device *net_dev = efx->net_dev; struct efx_rx_buffer *rx_buf; + struct sk_buff *skb; int skb_len = efx->rx_buffer_len; unsigned index, count; @@ -136,24 +150,23 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); - rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); - if (unlikely(!rx_buf->skb)) + rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); + if (unlikely(!skb)) return -ENOMEM; - rx_buf->page = NULL; /* Adjust the SKB for padding and checksum */ - skb_reserve(rx_buf->skb, NET_IP_ALIGN); + skb_reserve(skb, NET_IP_ALIGN); rx_buf->len = skb_len - NET_IP_ALIGN; - rx_buf->data = (char *)rx_buf->skb->data; - rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; + rx_buf->is_page = false; + skb->ip_summed = CHECKSUM_UNNECESSARY; rx_buf->dma_addr = pci_map_single(efx->pci_dev, - rx_buf->data, rx_buf->len, + skb->data, rx_buf->len, PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { - dev_kfree_skb_any(rx_buf->skb); - rx_buf->skb = NULL; + dev_kfree_skb_any(skb); + rx_buf->u.skb = NULL; return -EIO; } @@ -211,10 +224,9 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; - rx_buf->skb = NULL; - rx_buf->page = page; - rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN; + rx_buf->u.page = page; rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; + rx_buf->is_page = true; ++rx_queue->added_count; ++rx_queue->alloc_page_count; ++state->refcnt; @@ -235,19 +247,17 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) static void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf) { - if (rx_buf->page) { + if (rx_buf->is_page && rx_buf->u.page) { struct efx_rx_page_state *state; - EFX_BUG_ON_PARANOID(rx_buf->skb); - - state = page_address(rx_buf->page); + state = page_address(rx_buf->u.page); if (--state->refcnt == 0) { pci_unmap_page(efx->pci_dev, state->dma_addr, efx_rx_buf_size(efx), PCI_DMA_FROMDEVICE); } - } else if (likely(rx_buf->skb)) { + } else if (!rx_buf->is_page && rx_buf->u.skb) { pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, rx_buf->len, PCI_DMA_FROMDEVICE); } @@ -256,12 +266,12 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, static void efx_free_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf) { - if (rx_buf->page) { - __free_pages(rx_buf->page, efx->rx_buffer_order); - rx_buf->page = NULL; - } else if (likely(rx_buf->skb)) { - dev_kfree_skb_any(rx_buf->skb); - rx_buf->skb = NULL; + if (rx_buf->is_page && rx_buf->u.page) { + __free_pages(rx_buf->u.page, efx->rx_buffer_order); + rx_buf->u.page = NULL; + } else if (!rx_buf->is_page && rx_buf->u.skb) { + dev_kfree_skb_any(rx_buf->u.skb); + rx_buf->u.skb = NULL; } } @@ -277,7 +287,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf) { - struct efx_rx_page_state *state = page_address(rx_buf->page); + struct efx_rx_page_state *state = page_address(rx_buf->u.page); struct efx_rx_buffer *new_buf; unsigned fill_level, index; @@ -292,16 +302,14 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, } ++state->refcnt; - get_page(rx_buf->page); + get_page(rx_buf->u.page); index = rx_queue->added_count & rx_queue->ptr_mask; new_buf = efx_rx_buffer(rx_queue, index); new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); - new_buf->skb = NULL; - new_buf->page = rx_buf->page; - new_buf->data = (void *) - ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1)); + new_buf->u.page = rx_buf->u.page; new_buf->len = rx_buf->len; + new_buf->is_page = true; ++rx_queue->added_count; } @@ -315,16 +323,15 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel, struct efx_rx_buffer *new_buf; unsigned index; - if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && - page_count(rx_buf->page) == 1) + if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && + page_count(rx_buf->u.page) == 1) efx_resurrect_rx_buffer(rx_queue, rx_buf); index = rx_queue->added_count & rx_queue->ptr_mask; new_buf = efx_rx_buffer(rx_queue, index); memcpy(new_buf, rx_buf, sizeof(*new_buf)); - rx_buf->page = NULL; - rx_buf->skb = NULL; + rx_buf->u.page = NULL; ++rx_queue->added_count; } @@ -428,7 +435,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, * data at the end of the skb will be trashed. So * we have no choice but to leak the fragment. */ - *leak_packet = (rx_buf->skb != NULL); + *leak_packet = !rx_buf->is_page; efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); } else { if (net_ratelimit()) @@ -448,19 +455,18 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, */ static void efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, - bool checksummed) + const u8 *eh, bool checksummed) { struct napi_struct *napi = &channel->napi_str; gro_result_t gro_result; /* Pass the skb/page into the GRO engine */ - if (rx_buf->page) { + if (rx_buf->is_page) { struct efx_nic *efx = channel->efx; - struct page *page = rx_buf->page; + struct page *page = rx_buf->u.page; struct sk_buff *skb; - EFX_BUG_ON_PARANOID(rx_buf->skb); - rx_buf->page = NULL; + rx_buf->u.page = NULL; skb = napi_get_frags(napi); if (!skb) { @@ -469,11 +475,11 @@ static void efx_rx_packet_gro(struct efx_channel *channel, } if (efx->net_dev->features & NETIF_F_RXHASH) - skb->rxhash = efx_rx_buf_hash(rx_buf); + skb->rxhash = efx_rx_buf_hash(eh); skb_shinfo(skb)->frags[0].page = page; skb_shinfo(skb)->frags[0].page_offset = - efx_rx_buf_offset(rx_buf); + efx_rx_buf_offset(efx, rx_buf); skb_shinfo(skb)->frags[0].size = rx_buf->len; skb_shinfo(skb)->nr_frags = 1; @@ -487,11 +493,10 @@ static void efx_rx_packet_gro(struct efx_channel *channel, gro_result = napi_gro_frags(napi); } else { - struct sk_buff *skb = rx_buf->skb; + struct sk_buff *skb = rx_buf->u.skb; - EFX_BUG_ON_PARANOID(!skb); EFX_BUG_ON_PARANOID(!checksummed); - rx_buf->skb = NULL; + rx_buf->u.skb = NULL; gro_result = napi_gro_receive(napi, skb); } @@ -513,9 +518,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, bool leak_packet = false; rx_buf = efx_rx_buffer(rx_queue, index); - EFX_BUG_ON_PARANOID(!rx_buf->data); - EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page); - EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page)); /* This allows the refill path to post another buffer. * EFX_RXD_HEAD_ROOM ensures that the slot we are using @@ -554,12 +556,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, /* Prefetch nice and early so data will (hopefully) be in cache by * the time we look at it. */ - prefetch(rx_buf->data); + prefetch(efx_rx_buf_eh(efx, rx_buf)); /* Pipeline receives so that we give time for packet headers to be * prefetched into cache. */ - rx_buf->len = len; + rx_buf->len = len - efx->type->rx_buffer_hash_size; out: if (channel->rx_pkt) __efx_rx_packet(channel, @@ -574,45 +576,43 @@ void __efx_rx_packet(struct efx_channel *channel, { struct efx_nic *efx = channel->efx; struct sk_buff *skb; - - rx_buf->data += efx->type->rx_buffer_hash_size; - rx_buf->len -= efx->type->rx_buffer_hash_size; + u8 *eh = efx_rx_buf_eh(efx, rx_buf); /* If we're in loopback test, then pass the packet directly to the * loopback layer, and free the rx_buf here */ if (unlikely(efx->loopback_selftest)) { - efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); + efx_loopback_rx_packet(efx, eh, rx_buf->len); efx_free_rx_buffer(efx, rx_buf); return; } - if (rx_buf->skb) { - prefetch(skb_shinfo(rx_buf->skb)); + if (!rx_buf->is_page) { + skb = rx_buf->u.skb; - skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size); - skb_put(rx_buf->skb, rx_buf->len); + prefetch(skb_shinfo(skb)); + + skb_reserve(skb, efx->type->rx_buffer_hash_size); + skb_put(skb, rx_buf->len); if (efx->net_dev->features & NETIF_F_RXHASH) - rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf); + skb->rxhash = efx_rx_buf_hash(eh); /* Move past the ethernet header. rx_buf->data still points * at the ethernet header */ - rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, - efx->net_dev); + skb->protocol = eth_type_trans(skb, efx->net_dev); - skb_record_rx_queue(rx_buf->skb, channel->channel); + skb_record_rx_queue(skb, channel->channel); } - if (likely(checksummed || rx_buf->page)) { - efx_rx_packet_gro(channel, rx_buf, checksummed); + if (likely(checksummed || rx_buf->is_page)) { + efx_rx_packet_gro(channel, rx_buf, eh, checksummed); return; } /* We now own the SKB */ - skb = rx_buf->skb; - rx_buf->skb = NULL; - EFX_BUG_ON_PARANOID(!skb); + skb = rx_buf->u.skb; + rx_buf->u.skb = NULL; /* Set the SKB flags */ skb_checksum_none_assert(skb); diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index 0ebfb99f1299..a0f49b348d62 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, goto out; } - /* Test both types of TX queue */ + /* Test all enabled types of TX queue */ efx_for_each_channel_tx_queue(tx_queue, channel) { state->offload_csum = (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD); diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h index aed495a4dad7..dba5456e70f3 100644 --- a/drivers/net/sfc/selftest.h +++ b/drivers/net/sfc/selftest.h @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2008 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index bf8456176443..e4dd8986b1fe 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -227,13 +227,6 @@ static int siena_probe_nic(struct efx_nic *efx) if (rc) goto fail1; - rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build); - if (rc) { - netif_err(efx, probe, efx->net_dev, - "Failed to read MCPU firmware version - rc %d\n", rc); - goto fail1; /* MCPU absent? */ - } - /* Let the BMC know that the driver is now in charge of link and * filter settings. We must do this before we reset the NIC */ rc = efx_mcdi_drv_attach(efx, true, &already_attached); @@ -348,11 +341,6 @@ static int siena_init_nic(struct efx_nic *efx) FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); - if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0) - /* No MCDI operation has been defined to set thresholds */ - netif_err(efx, hw, efx->net_dev, - "ignoring RX flow control thresholds\n"); - /* Enable event logging */ rc = efx_mcdi_log_ctrl(efx, true, false, 0); if (rc) @@ -514,16 +502,6 @@ static void siena_stop_nic_stats(struct efx_nic *efx) efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); } -void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len) -{ - struct siena_nic_data *nic_data = efx->nic_data; - snprintf(buf, len, "%u.%u.%u.%u", - (unsigned int)(nic_data->fw_version >> 48), - (unsigned int)(nic_data->fw_version >> 32 & 0xffff), - (unsigned int)(nic_data->fw_version >> 16 & 0xffff), - (unsigned int)(nic_data->fw_version & 0xffff)); -} - /************************************************************************** * * Wake on LAN diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h index 879b7f6bde3d..71f2e3ebe1c7 100644 --- a/drivers/net/sfc/spi.h +++ b/drivers/net/sfc/spi.h @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005 Fen Systems Ltd. - * Copyright 2006 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index f102912eba91..efdceb35aaae 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2007-2009 Solarflare Communications Inc. + * Copyright 2007-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -196,7 +196,7 @@ static int tenxpress_phy_init(struct efx_nic *efx) if (rc < 0) return rc; - rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); + rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS); if (rc < 0) return rc; } diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 2f5e9da657bf..139801908217 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c @@ -1,7 +1,7 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. - * Copyright 2005-2009 Solarflare Communications Inc. + * Copyright 2005-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, { struct efx_nic *efx = netdev_priv(net_dev); struct efx_tx_queue *tx_queue; + unsigned index, type; if (unlikely(efx->port_inhibited)) return NETDEV_TX_BUSY; - tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), - skb->ip_summed == CHECKSUM_PARTIAL ? - EFX_TXQ_TYPE_OFFLOAD : 0); + index = skb_get_queue_mapping(skb); + type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; + if (index >= efx->n_tx_channels) { + index -= efx->n_tx_channels; + type |= EFX_TXQ_TYPE_HIGHPRI; + } + tx_queue = efx_get_tx_queue(efx, index, type); return efx_enqueue_skb(tx_queue, skb); } +void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) +{ + struct efx_nic *efx = tx_queue->efx; + + /* Must be inverse of queue lookup in efx_hard_start_xmit() */ + tx_queue->core_txq = + netdev_get_tx_queue(efx->net_dev, + tx_queue->queue / EFX_TXQ_TYPES + + ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? + efx->n_tx_channels : 0)); +} + +int efx_setup_tc(struct net_device *net_dev, u8 num_tc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_channel *channel; + struct efx_tx_queue *tx_queue; + unsigned tc; + int rc; + + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) + return -EINVAL; + + if (num_tc == net_dev->num_tc) + return 0; + + for (tc = 0; tc < num_tc; tc++) { + net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; + net_dev->tc_to_txq[tc].count = efx->n_tx_channels; + } + + if (num_tc > net_dev->num_tc) { + /* Initialise high-priority queues as necessary */ + efx_for_each_channel(channel, efx) { + efx_for_each_possible_channel_tx_queue(tx_queue, + channel) { + if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) + continue; + if (!tx_queue->buffer) { + rc = efx_probe_tx_queue(tx_queue); + if (rc) + return rc; + } + if (!tx_queue->initialised) + efx_init_tx_queue(tx_queue); + efx_init_tx_queue_core_txq(tx_queue); + } + } + } else { + /* Reduce number of classes before number of queues */ + net_dev->num_tc = num_tc; + } + + rc = netif_set_real_num_tx_queues(net_dev, + max_t(int, num_tc, 1) * + efx->n_tx_channels); + if (rc) + return rc; + + /* Do not destroy high-priority queues when they become + * unused. We would have to flush them first, and it is + * fairly difficult to flush a subset of TX queues. Leave + * it to efx_fini_channels(). + */ + + net_dev->num_tc = num_tc; + return 0; +} + void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned fill_level; @@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) /* Set up TX descriptor ring */ efx_nic_init_tx(tx_queue); + + tx_queue->initialised = true; } void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) @@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) { + if (!tx_queue->initialised) + return; + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "shutting down TX queue %d\n", tx_queue->queue); + tx_queue->initialised = false; + /* Flush TX queue, remove descriptor ring */ efx_nic_fini_tx(tx_queue); @@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) { + if (!tx_queue->buffer) + return; + netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "destroying TX queue %d\n", tx_queue->queue); efx_nic_remove_tx(tx_queue); diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c index 351794a79215..d9886addcc99 100644 --- a/drivers/net/sfc/txc43128_phy.c +++ b/drivers/net/sfc/txc43128_phy.c @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2006-2010 Solarflare Communications Inc. + * Copyright 2006-2011 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published @@ -193,7 +193,7 @@ static int txc_reset_phy(struct efx_nic *efx) goto fail; /* Check that all the MMDs we expect are present and responding. */ - rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS, 0); + rc = efx_mdio_check_mmds(efx, TXC_REQUIRED_DEVS); if (rc < 0) goto fail; diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index e0d63083c3a8..e4dd3a7f304b 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h @@ -1,6 +1,6 @@ /**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards - * Copyright 2006-2009 Solarflare Communications Inc. + * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 819c1750e2ab..e9e7a530552c 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -32,35 +32,40 @@ #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/slab.h> +#include <linux/ethtool.h> #include <asm/cacheflush.h> #include "sh_eth.h" +#define SH_ETH_DEF_MSG_ENABLE \ + (NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_RX_ERR| \ + NETIF_MSG_TX_ERR) + /* There is CPU dependent code */ #if defined(CONFIG_CPU_SUBTYPE_SH7724) #define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_duplex(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - u32 ioaddr = ndev->base_addr; if (mdp->duplex) /* Full */ - writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); else /* Half */ - writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); } static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - u32 ioaddr = ndev->base_addr; switch (mdp->speed) { case 10: /* 10BASE */ - writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); break; case 100:/* 100BASE */ - writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); break; default: break; @@ -89,29 +94,28 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ }; #elif defined(CONFIG_CPU_SUBTYPE_SH7757) -#define SH_ETH_RESET_DEFAULT 1 +#define SH_ETH_HAS_BOTH_MODULES 1 +#define SH_ETH_HAS_TSU 1 static void sh_eth_set_duplex(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - u32 ioaddr = ndev->base_addr; if (mdp->duplex) /* Full */ - writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); else /* Half */ - writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); } static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - u32 ioaddr = ndev->base_addr; switch (mdp->speed) { case 10: /* 10BASE */ - writel(0, ioaddr + RTRATE); + sh_eth_write(ndev, 0, RTRATE); break; case 100:/* 100BASE */ - writel(1, ioaddr + RTRATE); + sh_eth_write(ndev, 1, RTRATE); break; default: break; @@ -138,24 +142,154 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .no_ade = 1, }; +#define SH_GIGA_ETH_BASE 0xfee00000 +#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) +#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) +static void sh_eth_chip_reset_giga(struct net_device *ndev) +{ + int i; + unsigned long mahr[2], malr[2]; + + /* save MAHR and MALR */ + for (i = 0; i < 2; i++) { + malr[i] = readl(GIGA_MALR(i)); + mahr[i] = readl(GIGA_MAHR(i)); + } + + /* reset device */ + writel(ARSTR_ARSTR, SH_GIGA_ETH_BASE + 0x1800); + mdelay(1); + + /* restore MAHR and MALR */ + for (i = 0; i < 2; i++) { + writel(malr[i], GIGA_MALR(i)); + writel(mahr[i], GIGA_MAHR(i)); + } +} + +static int sh_eth_is_gether(struct sh_eth_private *mdp); +static void sh_eth_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int cnt = 100; + + if (sh_eth_is_gether(mdp)) { + sh_eth_write(ndev, 0x03, EDSR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, + EDMR); + while (cnt > 0) { + if (!(sh_eth_read(ndev, EDMR) & 0x3)) + break; + mdelay(1); + cnt--; + } + if (cnt < 0) + printk(KERN_ERR "Device reset fail\n"); + + /* Table Init */ + sh_eth_write(ndev, 0x0, TDLAR); + sh_eth_write(ndev, 0x0, TDFAR); + sh_eth_write(ndev, 0x0, TDFXR); + sh_eth_write(ndev, 0x0, TDFFR); + sh_eth_write(ndev, 0x0, RDLAR); + sh_eth_write(ndev, 0x0, RDFAR); + sh_eth_write(ndev, 0x0, RDFXR); + sh_eth_write(ndev, 0x0, RDFFR); + } else { + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, + EDMR); + mdelay(3); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, + EDMR); + } +} + +static void sh_eth_set_duplex_giga(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + if (mdp->duplex) /* Full */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); + else /* Half */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); +} + +static void sh_eth_set_rate_giga(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + switch (mdp->speed) { + case 10: /* 10BASE */ + sh_eth_write(ndev, 0x00000000, GECMR); + break; + case 100:/* 100BASE */ + sh_eth_write(ndev, 0x00000010, GECMR); + break; + case 1000: /* 1000BASE */ + sh_eth_write(ndev, 0x00000020, GECMR); + break; + default: + break; + } +} + +/* SH7757(GETHERC) */ +static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { + .chip_reset = sh_eth_chip_reset_giga, + .set_duplex = sh_eth_set_duplex_giga, + .set_rate = sh_eth_set_rate_giga, + + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ + EESR_ECI, + .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ + EESR_TFE, + .fdr_value = 0x0000072f, + .rmcr_value = 0x00000001, + + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .rpadir = 1, + .rpadir_value = 2 << 16, + .no_trimd = 1, + .no_ade = 1, +}; + +static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) +{ + if (sh_eth_is_gether(mdp)) + return &sh_eth_my_cpu_data_giga; + else + return &sh_eth_my_cpu_data; +} + #elif defined(CONFIG_CPU_SUBTYPE_SH7763) #define SH_ETH_HAS_TSU 1 static void sh_eth_chip_reset(struct net_device *ndev) { + struct sh_eth_private *mdp = netdev_priv(ndev); + /* reset device */ - writel(ARSTR_ARSTR, ARSTR); + sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); mdelay(1); } static void sh_eth_reset(struct net_device *ndev) { - u32 ioaddr = ndev->base_addr; int cnt = 100; - writel(EDSR_ENALL, ioaddr + EDSR); - writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); + sh_eth_write(ndev, EDSR_ENALL, EDSR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); while (cnt > 0) { - if (!(readl(ioaddr + EDMR) & 0x3)) + if (!(sh_eth_read(ndev, EDMR) & 0x3)) break; mdelay(1); cnt--; @@ -164,41 +298,39 @@ static void sh_eth_reset(struct net_device *ndev) printk(KERN_ERR "Device reset fail\n"); /* Table Init */ - writel(0x0, ioaddr + TDLAR); - writel(0x0, ioaddr + TDFAR); - writel(0x0, ioaddr + TDFXR); - writel(0x0, ioaddr + TDFFR); - writel(0x0, ioaddr + RDLAR); - writel(0x0, ioaddr + RDFAR); - writel(0x0, ioaddr + RDFXR); - writel(0x0, ioaddr + RDFFR); + sh_eth_write(ndev, 0x0, TDLAR); + sh_eth_write(ndev, 0x0, TDFAR); + sh_eth_write(ndev, 0x0, TDFXR); + sh_eth_write(ndev, 0x0, TDFFR); + sh_eth_write(ndev, 0x0, RDLAR); + sh_eth_write(ndev, 0x0, RDFAR); + sh_eth_write(ndev, 0x0, RDFXR); + sh_eth_write(ndev, 0x0, RDFFR); } static void sh_eth_set_duplex(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - u32 ioaddr = ndev->base_addr; if (mdp->duplex) /* Full */ - writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); else /* Half */ - writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); } static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - u32 ioaddr = ndev->base_addr; switch (mdp->speed) { case 10: /* 10BASE */ - writel(GECMR_10, ioaddr + GECMR); + sh_eth_write(ndev, GECMR_10, GECMR); break; case 100:/* 100BASE */ - writel(GECMR_100, ioaddr + GECMR); + sh_eth_write(ndev, GECMR_100, GECMR); break; case 1000: /* 1000BASE */ - writel(GECMR_1000, ioaddr + GECMR); + sh_eth_write(ndev, GECMR_1000, GECMR); break; default: break; @@ -229,6 +361,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .hw_swap = 1, .no_trimd = 1, .no_ade = 1, + .tsu = 1, }; #elif defined(CONFIG_CPU_SUBTYPE_SH7619) @@ -246,6 +379,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { #define SH_ETH_HAS_TSU 1 static struct sh_eth_cpu_data sh_eth_my_cpu_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, + .tsu = 1, }; #endif @@ -281,11 +415,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) /* Chip Reset */ static void sh_eth_reset(struct net_device *ndev) { - u32 ioaddr = ndev->base_addr; - - writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); mdelay(3); - writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); } #endif @@ -334,13 +466,11 @@ static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) */ static void update_mac_address(struct net_device *ndev) { - u32 ioaddr = ndev->base_addr; - - writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | - (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), - ioaddr + MAHR); - writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), - ioaddr + MALR); + sh_eth_write(ndev, + (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | + (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); + sh_eth_write(ndev, + (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); } /* @@ -353,21 +483,36 @@ static void update_mac_address(struct net_device *ndev) */ static void read_mac_address(struct net_device *ndev, unsigned char *mac) { - u32 ioaddr = ndev->base_addr; - if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { memcpy(ndev->dev_addr, mac, 6); } else { - ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24); - ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF; - ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF; - ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF); - ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF; - ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF); + ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); + ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; + ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; + ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); + ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; + ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); } } +static int sh_eth_is_gether(struct sh_eth_private *mdp) +{ + if (mdp->reg_offset == sh_eth_offset_gigabit) + return 1; + else + return 0; +} + +static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) +{ + if (sh_eth_is_gether(mdp)) + return EDTRR_TRNS_GETHER; + else + return EDTRR_TRNS_ETHER; +} + struct bb_info { + void (*set_gate)(unsigned long addr); struct mdiobb_ctrl ctrl; u32 addr; u32 mmd_msk;/* MMD */ @@ -398,6 +543,10 @@ static int bb_read(u32 addr, u32 msk) static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + if (bit) bb_set(bitbang->addr, bitbang->mmd_msk); else @@ -409,6 +558,9 @@ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + if (bit) bb_set(bitbang->addr, bitbang->mdo_msk); else @@ -419,6 +571,10 @@ static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) static int sh_get_mdio(struct mdiobb_ctrl *ctrl) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + return bb_read(bitbang->addr, bitbang->mdi_msk); } @@ -427,6 +583,9 @@ static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) { struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); + if (bitbang->set_gate) + bitbang->set_gate(bitbang->addr); + if (bit) bb_set(bitbang->addr, bitbang->mdc_msk); else @@ -470,7 +629,6 @@ static void sh_eth_ring_free(struct net_device *ndev) /* format skb and descriptor buffer */ static void sh_eth_ring_format(struct net_device *ndev) { - u32 ioaddr = ndev->base_addr; struct sh_eth_private *mdp = netdev_priv(ndev); int i; struct sk_buff *skb; @@ -506,10 +664,9 @@ static void sh_eth_ring_format(struct net_device *ndev) rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); /* Rx descriptor address set */ if (i == 0) { - writel(mdp->rx_desc_dma, ioaddr + RDLAR); -#if defined(CONFIG_CPU_SUBTYPE_SH7763) - writel(mdp->rx_desc_dma, ioaddr + RDFAR); -#endif + sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); + if (sh_eth_is_gether(mdp)) + sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); } } @@ -528,10 +685,9 @@ static void sh_eth_ring_format(struct net_device *ndev) txdesc->buffer_length = 0; if (i == 0) { /* Tx descriptor address set */ - writel(mdp->tx_desc_dma, ioaddr + TDLAR); -#if defined(CONFIG_CPU_SUBTYPE_SH7763) - writel(mdp->tx_desc_dma, ioaddr + TDFAR); -#endif + sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); + if (sh_eth_is_gether(mdp)) + sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); } } @@ -613,7 +769,6 @@ static int sh_eth_dev_init(struct net_device *ndev) { int ret = 0; struct sh_eth_private *mdp = netdev_priv(ndev); - u32 ioaddr = ndev->base_addr; u_int32_t rx_int_var, tx_int_var; u32 val; @@ -623,71 +778,71 @@ static int sh_eth_dev_init(struct net_device *ndev) /* Descriptor format */ sh_eth_ring_format(ndev); if (mdp->cd->rpadir) - writel(mdp->cd->rpadir_value, ioaddr + RPADIR); + sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); /* all sh_eth int mask */ - writel(0, ioaddr + EESIPR); + sh_eth_write(ndev, 0, EESIPR); #if defined(__LITTLE_ENDIAN__) if (mdp->cd->hw_swap) - writel(EDMR_EL, ioaddr + EDMR); + sh_eth_write(ndev, EDMR_EL, EDMR); else #endif - writel(0, ioaddr + EDMR); + sh_eth_write(ndev, 0, EDMR); /* FIFO size set */ - writel(mdp->cd->fdr_value, ioaddr + FDR); - writel(0, ioaddr + TFTR); + sh_eth_write(ndev, mdp->cd->fdr_value, FDR); + sh_eth_write(ndev, 0, TFTR); /* Frame recv control */ - writel(mdp->cd->rmcr_value, ioaddr + RMCR); + sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; tx_int_var = mdp->tx_int_var = DESC_I_TINT2; - writel(rx_int_var | tx_int_var, ioaddr + TRSCER); + sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER); if (mdp->cd->bculr) - writel(0x800, ioaddr + BCULR); /* Burst sycle set */ + sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ - writel(mdp->cd->fcftr_value, ioaddr + FCFTR); + sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); if (!mdp->cd->no_trimd) - writel(0, ioaddr + TRIMD); + sh_eth_write(ndev, 0, TRIMD); /* Recv frame limit set register */ - writel(RFLR_VALUE, ioaddr + RFLR); + sh_eth_write(ndev, RFLR_VALUE, RFLR); - writel(readl(ioaddr + EESR), ioaddr + EESR); - writel(mdp->cd->eesipr_value, ioaddr + EESIPR); + sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); /* PAUSE Prohibition */ - val = (readl(ioaddr + ECMR) & ECMR_DM) | + val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; - writel(val, ioaddr + ECMR); + sh_eth_write(ndev, val, ECMR); if (mdp->cd->set_rate) mdp->cd->set_rate(ndev); /* E-MAC Status Register clear */ - writel(mdp->cd->ecsr_value, ioaddr + ECSR); + sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); /* E-MAC Interrupt Enable register */ - writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR); + sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); /* Set MAC address */ update_mac_address(ndev); /* mask reset */ if (mdp->cd->apr) - writel(APR_AP, ioaddr + APR); + sh_eth_write(ndev, APR_AP, APR); if (mdp->cd->mpr) - writel(MPR_MP, ioaddr + MPR); + sh_eth_write(ndev, MPR_MP, MPR); if (mdp->cd->tpauser) - writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER); + sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); /* Setting the Rx mode will start the Rx process. */ - writel(EDRRR_R, ioaddr + EDRRR); + sh_eth_write(ndev, EDRRR_R, EDRRR); netif_start_queue(ndev); @@ -811,24 +966,37 @@ static int sh_eth_rx(struct net_device *ndev) /* Restart Rx engine if stopped. */ /* If we don't need to check status, don't. -KDU */ - if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R)) - writel(EDRRR_R, ndev->base_addr + EDRRR); + if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) + sh_eth_write(ndev, EDRRR_R, EDRRR); return 0; } +static void sh_eth_rcv_snd_disable(struct net_device *ndev) +{ + /* disable tx and rx */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & + ~(ECMR_RE | ECMR_TE), ECMR); +} + +static void sh_eth_rcv_snd_enable(struct net_device *ndev) +{ + /* enable tx and rx */ + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | + (ECMR_RE | ECMR_TE), ECMR); +} + /* error control function */ static void sh_eth_error(struct net_device *ndev, int intr_status) { struct sh_eth_private *mdp = netdev_priv(ndev); - u32 ioaddr = ndev->base_addr; u32 felic_stat; u32 link_stat; u32 mask; if (intr_status & EESR_ECI) { - felic_stat = readl(ioaddr + ECSR); - writel(felic_stat, ioaddr + ECSR); /* clear int */ + felic_stat = sh_eth_read(ndev, ECSR); + sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ if (felic_stat & ECSR_ICD) mdp->stats.tx_carrier_errors++; if (felic_stat & ECSR_LCHNG) { @@ -839,26 +1007,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) else link_stat = PHY_ST_LINK; } else { - link_stat = (readl(ioaddr + PSR)); + link_stat = (sh_eth_read(ndev, PSR)); if (mdp->ether_link_active_low) link_stat = ~link_stat; } - if (!(link_stat & PHY_ST_LINK)) { - /* Link Down : disable tx and rx */ - writel(readl(ioaddr + ECMR) & - ~(ECMR_RE | ECMR_TE), ioaddr + ECMR); - } else { + if (!(link_stat & PHY_ST_LINK)) + sh_eth_rcv_snd_disable(ndev); + else { /* Link Up */ - writel(readl(ioaddr + EESIPR) & - ~DMAC_M_ECI, ioaddr + EESIPR); + sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & + ~DMAC_M_ECI, EESIPR); /*clear int */ - writel(readl(ioaddr + ECSR), - ioaddr + ECSR); - writel(readl(ioaddr + EESIPR) | - DMAC_M_ECI, ioaddr + EESIPR); + sh_eth_write(ndev, sh_eth_read(ndev, ECSR), + ECSR); + sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | + DMAC_M_ECI, EESIPR); /* enable tx and rx */ - writel(readl(ioaddr + ECMR) | - (ECMR_RE | ECMR_TE), ioaddr + ECMR); + sh_eth_rcv_snd_enable(ndev); } } } @@ -867,6 +1032,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) /* Write buck end. unused write back interrupt */ if (intr_status & EESR_TABT) /* Transmit Abort int */ mdp->stats.tx_aborted_errors++; + if (netif_msg_tx_err(mdp)) + dev_err(&ndev->dev, "Transmit Abort\n"); } if (intr_status & EESR_RABT) { @@ -874,28 +1041,47 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) if (intr_status & EESR_RFRMER) { /* Receive Frame Overflow int */ mdp->stats.rx_frame_errors++; - dev_err(&ndev->dev, "Receive Frame Overflow\n"); + if (netif_msg_rx_err(mdp)) + dev_err(&ndev->dev, "Receive Abort\n"); } } - if (!mdp->cd->no_ade) { - if (intr_status & EESR_ADE && intr_status & EESR_TDE && - intr_status & EESR_TFE) - mdp->stats.tx_fifo_errors++; + if (intr_status & EESR_TDE) { + /* Transmit Descriptor Empty int */ + mdp->stats.tx_fifo_errors++; + if (netif_msg_tx_err(mdp)) + dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); + } + + if (intr_status & EESR_TFE) { + /* FIFO under flow */ + mdp->stats.tx_fifo_errors++; + if (netif_msg_tx_err(mdp)) + dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); } if (intr_status & EESR_RDE) { /* Receive Descriptor Empty int */ mdp->stats.rx_over_errors++; - if (readl(ioaddr + EDRRR) ^ EDRRR_R) - writel(EDRRR_R, ioaddr + EDRRR); - dev_err(&ndev->dev, "Receive Descriptor Empty\n"); + if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R) + sh_eth_write(ndev, EDRRR_R, EDRRR); + if (netif_msg_rx_err(mdp)) + dev_err(&ndev->dev, "Receive Descriptor Empty\n"); } + if (intr_status & EESR_RFE) { /* Receive FIFO Overflow int */ mdp->stats.rx_fifo_errors++; - dev_err(&ndev->dev, "Receive FIFO Overflow\n"); + if (netif_msg_rx_err(mdp)) + dev_err(&ndev->dev, "Receive FIFO Overflow\n"); + } + + if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { + /* Address Error */ + mdp->stats.tx_fifo_errors++; + if (netif_msg_tx_err(mdp)) + dev_err(&ndev->dev, "Address Error\n"); } mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; @@ -903,7 +1089,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) mask &= ~EESR_ADE; if (intr_status & mask) { /* Tx error */ - u32 edtrr = readl(ndev->base_addr + EDTRR); + u32 edtrr = sh_eth_read(ndev, EDTRR); /* dmesg */ dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", intr_status, mdp->cur_tx); @@ -913,9 +1099,9 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) sh_eth_txfree(ndev); /* SH7712 BUG */ - if (edtrr ^ EDTRR_TRNS) { + if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { /* tx dma start */ - writel(EDTRR_TRNS, ndev->base_addr + EDTRR); + sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); } /* wakeup */ netif_wake_queue(ndev); @@ -928,18 +1114,17 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_cpu_data *cd = mdp->cd; irqreturn_t ret = IRQ_NONE; - u32 ioaddr, intr_status = 0; + u32 intr_status = 0; - ioaddr = ndev->base_addr; spin_lock(&mdp->lock); /* Get interrpt stat */ - intr_status = readl(ioaddr + EESR); + intr_status = sh_eth_read(ndev, EESR); /* Clear interrupt */ if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | cd->tx_check | cd->eesr_err_check)) { - writel(intr_status, ioaddr + EESR); + sh_eth_write(ndev, intr_status, EESR); ret = IRQ_HANDLED; } else goto other_irq; @@ -982,7 +1167,6 @@ static void sh_eth_adjust_link(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); struct phy_device *phydev = mdp->phydev; - u32 ioaddr = ndev->base_addr; int new_state = 0; if (phydev->link != PHY_DOWN) { @@ -1000,8 +1184,8 @@ static void sh_eth_adjust_link(struct net_device *ndev) mdp->cd->set_rate(ndev); } if (mdp->link == PHY_DOWN) { - writel((readl(ioaddr + ECMR) & ~ECMR_TXF) - | ECMR_DM, ioaddr + ECMR); + sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF) + | ECMR_DM, ECMR); new_state = 1; mdp->link = phydev->link; } @@ -1012,7 +1196,7 @@ static void sh_eth_adjust_link(struct net_device *ndev) mdp->duplex = -1; } - if (new_state) + if (new_state && netif_msg_link(mdp)) phy_print_status(phydev); } @@ -1032,7 +1216,7 @@ static int sh_eth_phy_init(struct net_device *ndev) /* Try connect to PHY */ phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, - 0, PHY_INTERFACE_MODE_MII); + 0, mdp->phy_interface); if (IS_ERR(phydev)) { dev_err(&ndev->dev, "phy_connect failed\n"); return PTR_ERR(phydev); @@ -1063,6 +1247,131 @@ static int sh_eth_phy_start(struct net_device *ndev) return 0; } +static int sh_eth_get_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&mdp->lock, flags); + ret = phy_ethtool_gset(mdp->phydev, ecmd); + spin_unlock_irqrestore(&mdp->lock, flags); + + return ret; +} + +static int sh_eth_set_settings(struct net_device *ndev, + struct ethtool_cmd *ecmd) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&mdp->lock, flags); + + /* disable tx and rx */ + sh_eth_rcv_snd_disable(ndev); + + ret = phy_ethtool_sset(mdp->phydev, ecmd); + if (ret) + goto error_exit; + + if (ecmd->duplex == DUPLEX_FULL) + mdp->duplex = 1; + else + mdp->duplex = 0; + + if (mdp->cd->set_duplex) + mdp->cd->set_duplex(ndev); + +error_exit: + mdelay(1); + + /* enable tx and rx */ + sh_eth_rcv_snd_enable(ndev); + + spin_unlock_irqrestore(&mdp->lock, flags); + + return ret; +} + +static int sh_eth_nway_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&mdp->lock, flags); + ret = phy_start_aneg(mdp->phydev); + spin_unlock_irqrestore(&mdp->lock, flags); + + return ret; +} + +static u32 sh_eth_get_msglevel(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + return mdp->msg_enable; +} + +static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + mdp->msg_enable = value; +} + +static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { + "rx_current", "tx_current", + "rx_dirty", "tx_dirty", +}; +#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) + +static int sh_eth_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return SH_ETH_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void sh_eth_get_ethtool_stats(struct net_device *ndev, + struct ethtool_stats *stats, u64 *data) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int i = 0; + + /* device-specific stats */ + data[i++] = mdp->cur_rx; + data[i++] = mdp->cur_tx; + data[i++] = mdp->dirty_rx; + data[i++] = mdp->dirty_tx; +} + +static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, *sh_eth_gstrings_stats, + sizeof(sh_eth_gstrings_stats)); + break; + } +} + +static struct ethtool_ops sh_eth_ethtool_ops = { + .get_settings = sh_eth_get_settings, + .set_settings = sh_eth_set_settings, + .nway_reset = sh_eth_nway_reset, + .get_msglevel = sh_eth_get_msglevel, + .set_msglevel = sh_eth_set_msglevel, + .get_link = ethtool_op_get_link, + .get_strings = sh_eth_get_strings, + .get_ethtool_stats = sh_eth_get_ethtool_stats, + .get_sset_count = sh_eth_get_sset_count, +}; + /* network device open function */ static int sh_eth_open(struct net_device *ndev) { @@ -1073,8 +1382,8 @@ static int sh_eth_open(struct net_device *ndev) ret = request_irq(ndev->irq, sh_eth_interrupt, #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_CPU_SUBTYPE_SH7764) || \ - defined(CONFIG_CPU_SUBTYPE_SH7757) + defined(CONFIG_CPU_SUBTYPE_SH7764) || \ + defined(CONFIG_CPU_SUBTYPE_SH7757) IRQF_SHARED, #else 0, @@ -1117,15 +1426,14 @@ out_free_irq: static void sh_eth_tx_timeout(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - u32 ioaddr = ndev->base_addr; struct sh_eth_rxdesc *rxdesc; int i; netif_stop_queue(ndev); - /* worning message out. */ - printk(KERN_WARNING "%s: transmit timed out, status %8.8x," - " resetting...\n", ndev->name, (int)readl(ioaddr + EESR)); + if (netif_msg_timer(mdp)) + dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," + " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR)); /* tx_errors count up */ mdp->stats.tx_errors++; @@ -1167,6 +1475,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) spin_lock_irqsave(&mdp->lock, flags); if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { if (!sh_eth_txfree(ndev)) { + if (netif_msg_tx_queued(mdp)) + dev_warn(&ndev->dev, "TxFD exhausted.\n"); netif_stop_queue(ndev); spin_unlock_irqrestore(&mdp->lock, flags); return NETDEV_TX_BUSY; @@ -1196,8 +1506,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) mdp->cur_tx++; - if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS)) - writel(EDTRR_TRNS, ndev->base_addr + EDTRR); + if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp))) + sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR); return NETDEV_TX_OK; } @@ -1206,17 +1516,16 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) static int sh_eth_close(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - u32 ioaddr = ndev->base_addr; int ringsize; netif_stop_queue(ndev); /* Disable interrupts by clearing the interrupt mask. */ - writel(0x0000, ioaddr + EESIPR); + sh_eth_write(ndev, 0x0000, EESIPR); /* Stop the chip's Tx and Rx processes. */ - writel(0, ioaddr + EDTRR); - writel(0, ioaddr + EDRRR); + sh_eth_write(ndev, 0, EDTRR); + sh_eth_write(ndev, 0, EDRRR); /* PHY Disconnect */ if (mdp->phydev) { @@ -1247,25 +1556,24 @@ static int sh_eth_close(struct net_device *ndev) static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - u32 ioaddr = ndev->base_addr; pm_runtime_get_sync(&mdp->pdev->dev); - mdp->stats.tx_dropped += readl(ioaddr + TROCR); - writel(0, ioaddr + TROCR); /* (write clear) */ - mdp->stats.collisions += readl(ioaddr + CDCR); - writel(0, ioaddr + CDCR); /* (write clear) */ - mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR); - writel(0, ioaddr + LCCR); /* (write clear) */ -#if defined(CONFIG_CPU_SUBTYPE_SH7763) - mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */ - writel(0, ioaddr + CERCR); /* (write clear) */ - mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */ - writel(0, ioaddr + CEECR); /* (write clear) */ -#else - mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR); - writel(0, ioaddr + CNDCR); /* (write clear) */ -#endif + mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR); + sh_eth_write(ndev, 0, TROCR); /* (write clear) */ + mdp->stats.collisions += sh_eth_read(ndev, CDCR); + sh_eth_write(ndev, 0, CDCR); /* (write clear) */ + mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); + sh_eth_write(ndev, 0, LCCR); /* (write clear) */ + if (sh_eth_is_gether(mdp)) { + mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR); + sh_eth_write(ndev, 0, CERCR); /* (write clear) */ + mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR); + sh_eth_write(ndev, 0, CEECR); /* (write clear) */ + } else { + mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); + sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ + } pm_runtime_put_sync(&mdp->pdev->dev); return &mdp->stats; @@ -1291,48 +1599,46 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, /* Multicast reception directions set */ static void sh_eth_set_multicast_list(struct net_device *ndev) { - u32 ioaddr = ndev->base_addr; - if (ndev->flags & IFF_PROMISC) { /* Set promiscuous. */ - writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM, - ioaddr + ECMR); + sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) | + ECMR_PRM, ECMR); } else { /* Normal, unicast/broadcast-only mode. */ - writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT, - ioaddr + ECMR); + sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | + ECMR_MCT, ECMR); } } +#endif /* SH_ETH_HAS_TSU */ /* SuperH's TSU register init function */ -static void sh_eth_tsu_init(u32 ioaddr) -{ - writel(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */ - writel(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */ - writel(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */ - writel(0xc, ioaddr + TSU_BSYSL0); - writel(0xc, ioaddr + TSU_BSYSL1); - writel(0, ioaddr + TSU_PRISL0); - writel(0, ioaddr + TSU_PRISL1); - writel(0, ioaddr + TSU_FWSL0); - writel(0, ioaddr + TSU_FWSL1); - writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC); -#if defined(CONFIG_CPU_SUBTYPE_SH7763) - writel(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */ - writel(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */ -#else - writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */ - writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */ -#endif - writel(0, ioaddr + TSU_FWSR); /* all interrupt status clear */ - writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */ - writel(0, ioaddr + TSU_TEN); /* Disable all CAM entry */ - writel(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */ - writel(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */ - writel(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */ - writel(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */ +static void sh_eth_tsu_init(struct sh_eth_private *mdp) +{ + sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ + sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ + sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); + sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); + sh_eth_tsu_write(mdp, 0, TSU_PRISL0); + sh_eth_tsu_write(mdp, 0, TSU_PRISL1); + sh_eth_tsu_write(mdp, 0, TSU_FWSL0); + sh_eth_tsu_write(mdp, 0, TSU_FWSL1); + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); + if (sh_eth_is_gether(mdp)) { + sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ + } else { + sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ + sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ + } + sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ + sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ + sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ + sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ + sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ + sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ + sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ } -#endif /* SH_ETH_HAS_TSU */ /* MDIO bus release function */ static int sh_mdio_release(struct net_device *ndev) @@ -1355,7 +1661,8 @@ static int sh_mdio_release(struct net_device *ndev) } /* MDIO bus init function */ -static int sh_mdio_init(struct net_device *ndev, int id) +static int sh_mdio_init(struct net_device *ndev, int id, + struct sh_eth_plat_data *pd) { int ret, i; struct bb_info *bitbang; @@ -1369,7 +1676,8 @@ static int sh_mdio_init(struct net_device *ndev, int id) } /* bitbang init */ - bitbang->addr = ndev->base_addr + PIR; + bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR]; + bitbang->set_gate = pd->set_mdio_gate; bitbang->mdi_msk = 0x08; bitbang->mdo_msk = 0x04; bitbang->mmd_msk = 0x02;/* MMD */ @@ -1420,6 +1728,28 @@ out: return ret; } +static const u16 *sh_eth_get_register_offset(int register_type) +{ + const u16 *reg_offset = NULL; + + switch (register_type) { + case SH_ETH_REG_GIGABIT: + reg_offset = sh_eth_offset_gigabit; + break; + case SH_ETH_REG_FAST_SH4: + reg_offset = sh_eth_offset_fast_sh4; + break; + case SH_ETH_REG_FAST_SH3_SH2: + reg_offset = sh_eth_offset_fast_sh3_sh2; + break; + default: + printk(KERN_ERR "Unknown register type (%d)\n", register_type); + break; + } + + return reg_offset; +} + static const struct net_device_ops sh_eth_netdev_ops = { .ndo_open = sh_eth_open, .ndo_stop = sh_eth_close, @@ -1486,19 +1816,28 @@ static int sh_eth_drv_probe(struct platform_device *pdev) pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); /* get PHY ID */ mdp->phy_id = pd->phy; + mdp->phy_interface = pd->phy_interface; /* EDMAC endian */ mdp->edmac_endian = pd->edmac_endian; mdp->no_ether_link = pd->no_ether_link; mdp->ether_link_active_low = pd->ether_link_active_low; + mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); /* set cpu data */ +#if defined(SH_ETH_HAS_BOTH_MODULES) + mdp->cd = sh_eth_get_cpu_data(mdp); +#else mdp->cd = &sh_eth_my_cpu_data; +#endif sh_eth_set_default_cpu_data(mdp->cd); /* set function */ ndev->netdev_ops = &sh_eth_netdev_ops; + SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); ndev->watchdog_timeo = TX_TIMEOUT; + /* debug message level */ + mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; mdp->post_rx = POST_RX >> (devno << 1); mdp->post_fw = POST_FW >> (devno << 1); @@ -1507,13 +1846,23 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* First device only init */ if (!devno) { + if (mdp->cd->tsu) { + struct resource *rtsu; + rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!rtsu) { + dev_err(&pdev->dev, "Not found TSU resource\n"); + goto out_release; + } + mdp->tsu_addr = ioremap(rtsu->start, + resource_size(rtsu)); + } if (mdp->cd->chip_reset) mdp->cd->chip_reset(ndev); -#if defined(SH_ETH_HAS_TSU) - /* TSU init (Init only)*/ - sh_eth_tsu_init(SH_TSU_ADDR); -#endif + if (mdp->cd->tsu) { + /* TSU init (Init only)*/ + sh_eth_tsu_init(mdp); + } } /* network device register */ @@ -1522,7 +1871,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) goto out_release; /* mdio bus init */ - ret = sh_mdio_init(ndev, pdev->id); + ret = sh_mdio_init(ndev, pdev->id, pd); if (ret) goto out_unregister; @@ -1539,6 +1888,8 @@ out_unregister: out_release: /* net_dev free */ + if (mdp->tsu_addr) + iounmap(mdp->tsu_addr); if (ndev) free_netdev(ndev); @@ -1549,7 +1900,9 @@ out: static int sh_eth_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); + struct sh_eth_private *mdp = netdev_priv(ndev); + iounmap(mdp->tsu_addr); sh_mdio_release(ndev); unregister_netdev(ndev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h index efa64221eede..c3048a6ba676 100644 --- a/drivers/net/sh_eth.h +++ b/drivers/net/sh_eth.h @@ -2,7 +2,7 @@ * SuperH Ethernet device driver * * Copyright (C) 2006-2008 Nobuhiro Iwamatsu - * Copyright (C) 2008-2009 Renesas Solutions Corp. + * Copyright (C) 2008-2011 Renesas Solutions Corp. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -38,278 +38,340 @@ #define ETHERSMALL 60 #define PKT_BUF_SZ 1538 -#if defined(CONFIG_CPU_SUBTYPE_SH7763) -/* This CPU register maps is very difference by other SH4 CPU */ - -/* Chip Base Address */ -# define SH_TSU_ADDR 0xFEE01800 -# define ARSTR SH_TSU_ADDR - -/* Chip Registers */ -/* E-DMAC */ -# define EDSR 0x000 -# define EDMR 0x400 -# define EDTRR 0x408 -# define EDRRR 0x410 -# define EESR 0x428 -# define EESIPR 0x430 -# define TDLAR 0x010 -# define TDFAR 0x014 -# define TDFXR 0x018 -# define TDFFR 0x01C -# define RDLAR 0x030 -# define RDFAR 0x034 -# define RDFXR 0x038 -# define RDFFR 0x03C -# define TRSCER 0x438 -# define RMFCR 0x440 -# define TFTR 0x448 -# define FDR 0x450 -# define RMCR 0x458 -# define RPADIR 0x460 -# define FCFTR 0x468 - -/* Ether Register */ -# define ECMR 0x500 -# define ECSR 0x510 -# define ECSIPR 0x518 -# define PIR 0x520 -# define PSR 0x528 -# define PIPR 0x52C -# define RFLR 0x508 -# define APR 0x554 -# define MPR 0x558 -# define PFTCR 0x55C -# define PFRCR 0x560 -# define TPAUSER 0x564 -# define GECMR 0x5B0 -# define BCULR 0x5B4 -# define MAHR 0x5C0 -# define MALR 0x5C8 -# define TROCR 0x700 -# define CDCR 0x708 -# define LCCR 0x710 -# define CEFCR 0x740 -# define FRECR 0x748 -# define TSFRCR 0x750 -# define TLFRCR 0x758 -# define RFCR 0x760 -# define CERCR 0x768 -# define CEECR 0x770 -# define MAFCR 0x778 - -/* TSU Absolute Address */ -# define TSU_CTRST 0x004 -# define TSU_FWEN0 0x010 -# define TSU_FWEN1 0x014 -# define TSU_FCM 0x18 -# define TSU_BSYSL0 0x20 -# define TSU_BSYSL1 0x24 -# define TSU_PRISL0 0x28 -# define TSU_PRISL1 0x2C -# define TSU_FWSL0 0x30 -# define TSU_FWSL1 0x34 -# define TSU_FWSLC 0x38 -# define TSU_QTAG0 0x40 -# define TSU_QTAG1 0x44 -# define TSU_FWSR 0x50 -# define TSU_FWINMK 0x54 -# define TSU_ADQT0 0x48 -# define TSU_ADQT1 0x4C -# define TSU_VTAG0 0x58 -# define TSU_VTAG1 0x5C -# define TSU_ADSBSY 0x60 -# define TSU_TEN 0x64 -# define TSU_POST1 0x70 -# define TSU_POST2 0x74 -# define TSU_POST3 0x78 -# define TSU_POST4 0x7C -# define TSU_ADRH0 0x100 -# define TSU_ADRL0 0x104 -# define TSU_ADRH31 0x1F8 -# define TSU_ADRL31 0x1FC - -# define TXNLCR0 0x80 -# define TXALCR0 0x84 -# define RXNLCR0 0x88 -# define RXALCR0 0x8C -# define FWNLCR0 0x90 -# define FWALCR0 0x94 -# define TXNLCR1 0xA0 -# define TXALCR1 0xA4 -# define RXNLCR1 0xA8 -# define RXALCR1 0xAC -# define FWNLCR1 0xB0 -# define FWALCR1 0x40 - -#elif defined(CONFIG_CPU_SH4) /* #if defined(CONFIG_CPU_SUBTYPE_SH7763) */ -/* EtherC */ -#define ECMR 0x100 -#define RFLR 0x108 -#define ECSR 0x110 -#define ECSIPR 0x118 -#define PIR 0x120 -#define PSR 0x128 -#define RDMLR 0x140 -#define IPGR 0x150 -#define APR 0x154 -#define MPR 0x158 -#define TPAUSER 0x164 -#define RFCF 0x160 -#define TPAUSECR 0x168 -#define BCFRR 0x16c -#define MAHR 0x1c0 -#define MALR 0x1c8 -#define TROCR 0x1d0 -#define CDCR 0x1d4 -#define LCCR 0x1d8 -#define CNDCR 0x1dc -#define CEFCR 0x1e4 -#define FRECR 0x1e8 -#define TSFRCR 0x1ec -#define TLFRCR 0x1f0 -#define RFCR 0x1f4 -#define MAFCR 0x1f8 -#define RTRATE 0x1fc - -/* E-DMAC */ -#define EDMR 0x000 -#define EDTRR 0x008 -#define EDRRR 0x010 -#define TDLAR 0x018 -#define RDLAR 0x020 -#define EESR 0x028 -#define EESIPR 0x030 -#define TRSCER 0x038 -#define RMFCR 0x040 -#define TFTR 0x048 -#define FDR 0x050 -#define RMCR 0x058 -#define TFUCR 0x064 -#define RFOCR 0x068 -#define FCFTR 0x070 -#define RPADIR 0x078 -#define TRIMD 0x07c -#define RBWAR 0x0c8 -#define RDFAR 0x0cc -#define TBRAR 0x0d4 -#define TDFAR 0x0d8 -#else /* #elif defined(CONFIG_CPU_SH4) */ -/* This section is SH3 or SH2 */ -#ifndef CONFIG_CPU_SUBTYPE_SH7619 -/* Chip base address */ -# define SH_TSU_ADDR 0xA7000804 -# define ARSTR 0xA7000800 -#endif -/* Chip Registers */ -/* E-DMAC */ -# define EDMR 0x0000 -# define EDTRR 0x0004 -# define EDRRR 0x0008 -# define TDLAR 0x000C -# define RDLAR 0x0010 -# define EESR 0x0014 -# define EESIPR 0x0018 -# define TRSCER 0x001C -# define RMFCR 0x0020 -# define TFTR 0x0024 -# define FDR 0x0028 -# define RMCR 0x002C -# define EDOCR 0x0030 -# define FCFTR 0x0034 -# define RPADIR 0x0038 -# define TRIMD 0x003C -# define RBWAR 0x0040 -# define RDFAR 0x0044 -# define TBRAR 0x004C -# define TDFAR 0x0050 - -/* Ether Register */ -# define ECMR 0x0160 -# define ECSR 0x0164 -# define ECSIPR 0x0168 -# define PIR 0x016C -# define MAHR 0x0170 -# define MALR 0x0174 -# define RFLR 0x0178 -# define PSR 0x017C -# define TROCR 0x0180 -# define CDCR 0x0184 -# define LCCR 0x0188 -# define CNDCR 0x018C -# define CEFCR 0x0194 -# define FRECR 0x0198 -# define TSFRCR 0x019C -# define TLFRCR 0x01A0 -# define RFCR 0x01A4 -# define MAFCR 0x01A8 -# define IPGR 0x01B4 -# if defined(CONFIG_CPU_SUBTYPE_SH7710) -# define APR 0x01B8 -# define MPR 0x01BC -# define TPAUSER 0x1C4 -# define BCFR 0x1CC -# endif /* CONFIG_CPU_SH7710 */ - -/* TSU */ -# define TSU_CTRST 0x004 -# define TSU_FWEN0 0x010 -# define TSU_FWEN1 0x014 -# define TSU_FCM 0x018 -# define TSU_BSYSL0 0x020 -# define TSU_BSYSL1 0x024 -# define TSU_PRISL0 0x028 -# define TSU_PRISL1 0x02C -# define TSU_FWSL0 0x030 -# define TSU_FWSL1 0x034 -# define TSU_FWSLC 0x038 -# define TSU_QTAGM0 0x040 -# define TSU_QTAGM1 0x044 -# define TSU_ADQT0 0x048 -# define TSU_ADQT1 0x04C -# define TSU_FWSR 0x050 -# define TSU_FWINMK 0x054 -# define TSU_ADSBSY 0x060 -# define TSU_TEN 0x064 -# define TSU_POST1 0x070 -# define TSU_POST2 0x074 -# define TSU_POST3 0x078 -# define TSU_POST4 0x07C -# define TXNLCR0 0x080 -# define TXALCR0 0x084 -# define RXNLCR0 0x088 -# define RXALCR0 0x08C -# define FWNLCR0 0x090 -# define FWALCR0 0x094 -# define TXNLCR1 0x0A0 -# define TXALCR1 0x0A4 -# define RXNLCR1 0x0A8 -# define RXALCR1 0x0AC -# define FWNLCR1 0x0B0 -# define FWALCR1 0x0B4 - -#define TSU_ADRH0 0x0100 -#define TSU_ADRL0 0x0104 -#define TSU_ADRL31 0x01FC - -#endif /* CONFIG_CPU_SUBTYPE_SH7763 */ - -/* There are avoid compile error... */ -#if !defined(BCULR) -#define BCULR 0x0fc -#endif -#if !defined(TRIMD) -#define TRIMD 0x0fc -#endif -#if !defined(APR) -#define APR 0x0fc -#endif -#if !defined(MPR) -#define MPR 0x0fc -#endif -#if !defined(TPAUSER) -#define TPAUSER 0x0fc -#endif +enum { + /* E-DMAC registers */ + EDSR = 0, + EDMR, + EDTRR, + EDRRR, + EESR, + EESIPR, + TDLAR, + TDFAR, + TDFXR, + TDFFR, + RDLAR, + RDFAR, + RDFXR, + RDFFR, + TRSCER, + RMFCR, + TFTR, + FDR, + RMCR, + EDOCR, + TFUCR, + RFOCR, + FCFTR, + RPADIR, + TRIMD, + RBWAR, + TBRAR, + + /* Ether registers */ + ECMR, + ECSR, + ECSIPR, + PIR, + PSR, + RDMLR, + PIPR, + RFLR, + IPGR, + APR, + MPR, + PFTCR, + PFRCR, + RFCR, + RFCF, + TPAUSER, + TPAUSECR, + BCFR, + BCFRR, + GECMR, + BCULR, + MAHR, + MALR, + TROCR, + CDCR, + LCCR, + CNDCR, + CEFCR, + FRECR, + TSFRCR, + TLFRCR, + CERCR, + CEECR, + MAFCR, + RTRATE, + + /* TSU Absolute address */ + ARSTR, + TSU_CTRST, + TSU_FWEN0, + TSU_FWEN1, + TSU_FCM, + TSU_BSYSL0, + TSU_BSYSL1, + TSU_PRISL0, + TSU_PRISL1, + TSU_FWSL0, + TSU_FWSL1, + TSU_FWSLC, + TSU_QTAG0, + TSU_QTAG1, + TSU_QTAGM0, + TSU_QTAGM1, + TSU_FWSR, + TSU_FWINMK, + TSU_ADQT0, + TSU_ADQT1, + TSU_VTAG0, + TSU_VTAG1, + TSU_ADSBSY, + TSU_TEN, + TSU_POST1, + TSU_POST2, + TSU_POST3, + TSU_POST4, + TSU_ADRH0, + TSU_ADRL0, + TSU_ADRH31, + TSU_ADRL31, + + TXNLCR0, + TXALCR0, + RXNLCR0, + RXALCR0, + FWNLCR0, + FWALCR0, + TXNLCR1, + TXALCR1, + RXNLCR1, + RXALCR1, + FWNLCR1, + FWALCR1, + + /* This value must be written at last. */ + SH_ETH_MAX_REGISTER_OFFSET, +}; + +static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { + [EDSR] = 0x0000, + [EDMR] = 0x0400, + [EDTRR] = 0x0408, + [EDRRR] = 0x0410, + [EESR] = 0x0428, + [EESIPR] = 0x0430, + [TDLAR] = 0x0010, + [TDFAR] = 0x0014, + [TDFXR] = 0x0018, + [TDFFR] = 0x001c, + [RDLAR] = 0x0030, + [RDFAR] = 0x0034, + [RDFXR] = 0x0038, + [RDFFR] = 0x003c, + [TRSCER] = 0x0438, + [RMFCR] = 0x0440, + [TFTR] = 0x0448, + [FDR] = 0x0450, + [RMCR] = 0x0458, + [RPADIR] = 0x0460, + [FCFTR] = 0x0468, + + [ECMR] = 0x0500, + [ECSR] = 0x0510, + [ECSIPR] = 0x0518, + [PIR] = 0x0520, + [PSR] = 0x0528, + [PIPR] = 0x052c, + [RFLR] = 0x0508, + [APR] = 0x0554, + [MPR] = 0x0558, + [PFTCR] = 0x055c, + [PFRCR] = 0x0560, + [TPAUSER] = 0x0564, + [GECMR] = 0x05b0, + [BCULR] = 0x05b4, + [MAHR] = 0x05c0, + [MALR] = 0x05c8, + [TROCR] = 0x0700, + [CDCR] = 0x0708, + [LCCR] = 0x0710, + [CEFCR] = 0x0740, + [FRECR] = 0x0748, + [TSFRCR] = 0x0750, + [TLFRCR] = 0x0758, + [RFCR] = 0x0760, + [CERCR] = 0x0768, + [CEECR] = 0x0770, + [MAFCR] = 0x0778, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_FWEN0] = 0x0010, + [TSU_FWEN1] = 0x0014, + [TSU_FCM] = 0x0018, + [TSU_BSYSL0] = 0x0020, + [TSU_BSYSL1] = 0x0024, + [TSU_PRISL0] = 0x0028, + [TSU_PRISL1] = 0x002c, + [TSU_FWSL0] = 0x0030, + [TSU_FWSL1] = 0x0034, + [TSU_FWSLC] = 0x0038, + [TSU_QTAG0] = 0x0040, + [TSU_QTAG1] = 0x0044, + [TSU_FWSR] = 0x0050, + [TSU_FWINMK] = 0x0054, + [TSU_ADQT0] = 0x0048, + [TSU_ADQT1] = 0x004c, + [TSU_VTAG0] = 0x0058, + [TSU_VTAG1] = 0x005c, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, + [TSU_ADRH0] = 0x0100, + [TSU_ADRL0] = 0x0104, + [TSU_ADRH31] = 0x01f8, + [TSU_ADRL31] = 0x01fc, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008c, + [FWNLCR0] = 0x0090, + [FWALCR0] = 0x0094, + [TXNLCR1] = 0x00a0, + [TXALCR1] = 0x00a0, + [RXNLCR1] = 0x00a8, + [RXALCR1] = 0x00ac, + [FWNLCR1] = 0x00b0, + [FWALCR1] = 0x00b4, +}; + +static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = { + [ECMR] = 0x0100, + [RFLR] = 0x0108, + [ECSR] = 0x0110, + [ECSIPR] = 0x0118, + [PIR] = 0x0120, + [PSR] = 0x0128, + [RDMLR] = 0x0140, + [IPGR] = 0x0150, + [APR] = 0x0154, + [MPR] = 0x0158, + [TPAUSER] = 0x0164, + [RFCF] = 0x0160, + [TPAUSECR] = 0x0168, + [BCFRR] = 0x016c, + [MAHR] = 0x01c0, + [MALR] = 0x01c8, + [TROCR] = 0x01d0, + [CDCR] = 0x01d4, + [LCCR] = 0x01d8, + [CNDCR] = 0x01dc, + [CEFCR] = 0x01e4, + [FRECR] = 0x01e8, + [TSFRCR] = 0x01ec, + [TLFRCR] = 0x01f0, + [RFCR] = 0x01f4, + [MAFCR] = 0x01f8, + [RTRATE] = 0x01fc, + + [EDMR] = 0x0000, + [EDTRR] = 0x0008, + [EDRRR] = 0x0010, + [TDLAR] = 0x0018, + [RDLAR] = 0x0020, + [EESR] = 0x0028, + [EESIPR] = 0x0030, + [TRSCER] = 0x0038, + [RMFCR] = 0x0040, + [TFTR] = 0x0048, + [FDR] = 0x0050, + [RMCR] = 0x0058, + [TFUCR] = 0x0064, + [RFOCR] = 0x0068, + [FCFTR] = 0x0070, + [RPADIR] = 0x0078, + [TRIMD] = 0x007c, + [RBWAR] = 0x00c8, + [RDFAR] = 0x00cc, + [TBRAR] = 0x00d4, + [TDFAR] = 0x00d8, +}; + +static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { + [ECMR] = 0x0160, + [ECSR] = 0x0164, + [ECSIPR] = 0x0168, + [PIR] = 0x016c, + [MAHR] = 0x0170, + [MALR] = 0x0174, + [RFLR] = 0x0178, + [PSR] = 0x017c, + [TROCR] = 0x0180, + [CDCR] = 0x0184, + [LCCR] = 0x0188, + [CNDCR] = 0x018c, + [CEFCR] = 0x0194, + [FRECR] = 0x0198, + [TSFRCR] = 0x019c, + [TLFRCR] = 0x01a0, + [RFCR] = 0x01a4, + [MAFCR] = 0x01a8, + [IPGR] = 0x01b4, + [APR] = 0x01b8, + [MPR] = 0x01bc, + [TPAUSER] = 0x01c4, + [BCFR] = 0x01cc, + + [ARSTR] = 0x0000, + [TSU_CTRST] = 0x0004, + [TSU_FWEN0] = 0x0010, + [TSU_FWEN1] = 0x0014, + [TSU_FCM] = 0x0018, + [TSU_BSYSL0] = 0x0020, + [TSU_BSYSL1] = 0x0024, + [TSU_PRISL0] = 0x0028, + [TSU_PRISL1] = 0x002c, + [TSU_FWSL0] = 0x0030, + [TSU_FWSL1] = 0x0034, + [TSU_FWSLC] = 0x0038, + [TSU_QTAGM0] = 0x0040, + [TSU_QTAGM1] = 0x0044, + [TSU_ADQT0] = 0x0048, + [TSU_ADQT1] = 0x004c, + [TSU_FWSR] = 0x0050, + [TSU_FWINMK] = 0x0054, + [TSU_ADSBSY] = 0x0060, + [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, + + [TXNLCR0] = 0x0080, + [TXALCR0] = 0x0084, + [RXNLCR0] = 0x0088, + [RXALCR0] = 0x008c, + [FWNLCR0] = 0x0090, + [FWALCR0] = 0x0094, + [TXNLCR1] = 0x00a0, + [TXALCR1] = 0x00a0, + [RXNLCR1] = 0x00a8, + [RXALCR1] = 0x00ac, + [FWNLCR1] = 0x00b0, + [FWALCR1] = 0x00b4, + + [TSU_ADRH0] = 0x0100, + [TSU_ADRL0] = 0x0104, + [TSU_ADRL31] = 0x01fc, + +}; /* Driver's parameters */ #if defined(CONFIG_CPU_SH4) @@ -338,20 +400,14 @@ enum GECMR_BIT { enum DMAC_M_BIT { EDMR_EL = 0x40, /* Litte endian */ EDMR_DL1 = 0x20, EDMR_DL0 = 0x10, -#ifdef CONFIG_CPU_SUBTYPE_SH7763 - EDMR_SRST = 0x03, -#else /* CONFIG_CPU_SUBTYPE_SH7763 */ - EDMR_SRST = 0x01, -#endif + EDMR_SRST_GETHER = 0x03, + EDMR_SRST_ETHER = 0x01, }; /* EDTRR */ enum DMAC_T_BIT { -#ifdef CONFIG_CPU_SUBTYPE_SH7763 - EDTRR_TRNS = 0x03, -#else - EDTRR_TRNS = 0x01, -#endif + EDTRR_TRNS_GETHER = 0x03, + EDTRR_TRNS_ETHER = 0x01, }; /* EDRRR*/ @@ -695,6 +751,7 @@ struct sh_eth_cpu_data { unsigned mpr:1; /* EtherC have MPR */ unsigned tpauser:1; /* EtherC have TPAUSER */ unsigned bculr:1; /* EtherC have BCULR */ + unsigned tsu:1; /* EtherC have TSU */ unsigned hw_swap:1; /* E-DMAC have DE bit in EDMR */ unsigned rpadir:1; /* E-DMAC have RPADIR */ unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ @@ -704,6 +761,8 @@ struct sh_eth_cpu_data { struct sh_eth_private { struct platform_device *pdev; struct sh_eth_cpu_data *cd; + const u16 *reg_offset; + void __iomem *tsu_addr; dma_addr_t rx_desc_dma; dma_addr_t tx_desc_dma; struct sh_eth_rxdesc *rx_ring; @@ -722,6 +781,7 @@ struct sh_eth_private { struct mii_bus *mii_bus; /* MDIO bus control */ struct phy_device *phydev; /* PHY device control */ enum phy_state link; + phy_interface_t phy_interface; int msg_enable; int speed; int duplex; @@ -746,4 +806,32 @@ static inline void sh_eth_soft_swap(char *src, int len) #endif } +static inline void sh_eth_write(struct net_device *ndev, unsigned long data, + int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + writel(data, ndev->base_addr + mdp->reg_offset[enum_index]); +} + +static inline unsigned long sh_eth_read(struct net_device *ndev, + int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + + return readl(ndev->base_addr + mdp->reg_offset[enum_index]); +} + +static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, + unsigned long data, int enum_index) +{ + writel(data, mdp->tsu_addr + mdp->reg_offset[enum_index]); +} + +static inline unsigned long sh_eth_tsu_read(struct sh_eth_private *mdp, + int enum_index) +{ + return readl(mdp->tsu_addr + mdp->reg_offset[enum_index]); +} + #endif /* #ifndef __SH_ETH_H__ */ diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 640e368ebeee..84d4167eee9a 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -495,7 +495,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, sis_priv->mii_info.reg_num_mask = 0x1f; /* Get Mac address according to the chip revision */ - pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &(sis_priv->chipset_rev)); + sis_priv->chipset_rev = pci_dev->revision; if(netif_msg_probe(sis_priv)) printk(KERN_DEBUG "%s: detected revision %2.2x, " "trying to get MAC address...\n", @@ -532,7 +532,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev, /* save our host bridge revision */ dev = pci_get_device(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_630, NULL); if (dev) { - pci_read_config_byte(dev, PCI_CLASS_REVISION, &sis_priv->host_bridge_rev); + sis_priv->host_bridge_rev = dev->revision; pci_dev_put(dev); } diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 7d85a38377a1..2a91868788f7 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -4983,7 +4983,7 @@ static int sky2_suspend(struct device *dev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int sky2_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 726df611ee17..43654a3bb0ec 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -81,6 +81,7 @@ static const char version[] = #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/workqueue.h> +#include <linux/of.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -2394,6 +2395,15 @@ static int smc_drv_resume(struct device *dev) return 0; } +#ifdef CONFIG_OF +static const struct of_device_id smc91x_match[] = { + { .compatible = "smsc,lan91c94", }, + { .compatible = "smsc,lan91c111", }, + {}, +} +MODULE_DEVICE_TABLE(of, smc91x_match); +#endif + static struct dev_pm_ops smc_drv_pm_ops = { .suspend = smc_drv_suspend, .resume = smc_drv_resume, @@ -2406,6 +2416,9 @@ static struct platform_driver smc_driver = { .name = CARDNAME, .owner = THIS_MODULE, .pm = &smc_drv_pm_ops, +#ifdef CONFIG_OF + .of_match_table = smc91x_match, +#endif }, }; diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index d70bde95460b..1566259c1f27 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -791,8 +791,8 @@ static int smsc911x_mii_probe(struct net_device *dev) return -ENODEV; } - SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X", - phy_addr, phydev->addr, phydev->phy_id); + SMSC_TRACE(PROBE, "PHY: addr %d, phy_id 0x%08X", + phydev->addr, phydev->phy_id); ret = phy_connect_direct(dev, phydev, &smsc911x_phy_adjust_link, 0, diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 1c5408f83937..c1a344829b54 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -320,28 +320,28 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s if (txmac_stat & MAC_TXSTAT_URUN) { netdev_err(dev, "TX MAC xmit underrun\n"); - gp->net_stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; } if (txmac_stat & MAC_TXSTAT_MPE) { netdev_err(dev, "TX MAC max packet size error\n"); - gp->net_stats.tx_errors++; + dev->stats.tx_errors++; } /* The rest are all cases of one of the 16-bit TX * counters expiring. */ if (txmac_stat & MAC_TXSTAT_NCE) - gp->net_stats.collisions += 0x10000; + dev->stats.collisions += 0x10000; if (txmac_stat & MAC_TXSTAT_ECE) { - gp->net_stats.tx_aborted_errors += 0x10000; - gp->net_stats.collisions += 0x10000; + dev->stats.tx_aborted_errors += 0x10000; + dev->stats.collisions += 0x10000; } if (txmac_stat & MAC_TXSTAT_LCE) { - gp->net_stats.tx_aborted_errors += 0x10000; - gp->net_stats.collisions += 0x10000; + dev->stats.tx_aborted_errors += 0x10000; + dev->stats.collisions += 0x10000; } /* We do not keep track of MAC_TXSTAT_FCE and @@ -469,20 +469,20 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s u32 smac = readl(gp->regs + MAC_SMACHINE); netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); - gp->net_stats.rx_over_errors++; - gp->net_stats.rx_fifo_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_fifo_errors++; ret = gem_rxmac_reset(gp); } if (rxmac_stat & MAC_RXSTAT_ACE) - gp->net_stats.rx_frame_errors += 0x10000; + dev->stats.rx_frame_errors += 0x10000; if (rxmac_stat & MAC_RXSTAT_CCE) - gp->net_stats.rx_crc_errors += 0x10000; + dev->stats.rx_crc_errors += 0x10000; if (rxmac_stat & MAC_RXSTAT_LCE) - gp->net_stats.rx_length_errors += 0x10000; + dev->stats.rx_length_errors += 0x10000; /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE * events. @@ -594,7 +594,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat if (netif_msg_rx_err(gp)) printk(KERN_DEBUG "%s: no buffer for rx frame\n", gp->dev->name); - gp->net_stats.rx_dropped++; + dev->stats.rx_dropped++; } if (gem_status & GREG_STAT_RXTAGERR) { @@ -602,7 +602,7 @@ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_stat if (netif_msg_rx_err(gp)) printk(KERN_DEBUG "%s: corrupt rx tag framing\n", gp->dev->name); - gp->net_stats.rx_errors++; + dev->stats.rx_errors++; goto do_reset; } @@ -684,7 +684,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st break; } gp->tx_skbs[entry] = NULL; - gp->net_stats.tx_bytes += skb->len; + dev->stats.tx_bytes += skb->len; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { txd = &gp->init_block->txd[entry]; @@ -696,7 +696,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st entry = NEXT_TX(entry); } - gp->net_stats.tx_packets++; + dev->stats.tx_packets++; dev_kfree_skb_irq(skb); } gp->tx_old = entry; @@ -738,6 +738,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit) static int gem_rx(struct gem *gp, int work_to_do) { + struct net_device *dev = gp->dev; int entry, drops, work_done = 0; u32 done; __sum16 csum; @@ -782,15 +783,15 @@ static int gem_rx(struct gem *gp, int work_to_do) len = (status & RXDCTRL_BUFSZ) >> 16; if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { - gp->net_stats.rx_errors++; + dev->stats.rx_errors++; if (len < ETH_ZLEN) - gp->net_stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (len & RXDCTRL_BAD) - gp->net_stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; /* We'll just return it to GEM. */ drop_it: - gp->net_stats.rx_dropped++; + dev->stats.rx_dropped++; goto next; } @@ -843,8 +844,8 @@ static int gem_rx(struct gem *gp, int work_to_do) netif_receive_skb(skb); - gp->net_stats.rx_packets++; - gp->net_stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; next: entry = NEXT_RX(entry); @@ -2472,7 +2473,6 @@ static int gem_resume(struct pci_dev *pdev) static struct net_device_stats *gem_get_stats(struct net_device *dev) { struct gem *gp = netdev_priv(dev); - struct net_device_stats *stats = &gp->net_stats; spin_lock_irq(&gp->lock); spin_lock(&gp->tx_lock); @@ -2481,17 +2481,17 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) * so we shield against this */ if (gp->running) { - stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR); + dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); writel(0, gp->regs + MAC_FCSERR); - stats->rx_frame_errors += readl(gp->regs + MAC_AERR); + dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); writel(0, gp->regs + MAC_AERR); - stats->rx_length_errors += readl(gp->regs + MAC_LERR); + dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); writel(0, gp->regs + MAC_LERR); - stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL); - stats->collisions += + dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); + dev->stats.collisions += (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL)); writel(0, gp->regs + MAC_ECOLL); @@ -2501,7 +2501,7 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev) spin_unlock(&gp->tx_lock); spin_unlock_irq(&gp->lock); - return &gp->net_stats; + return &dev->stats; } static int gem_set_mac_address(struct net_device *dev, void *addr) diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index 19905460def6..ede017872367 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h @@ -994,7 +994,6 @@ struct gem { u32 status; struct napi_struct napi; - struct net_device_stats net_stats; int tx_fifo_sz; int rx_fifo_sz; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 06c0e5033656..ebec88882c3b 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -4,7 +4,7 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2005-2010 Broadcom Corporation. + * Copyright (C) 2005-2011 Broadcom Corporation. * * Firmware is: * Derived from proprietary unpublished source code, @@ -64,10 +64,10 @@ #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 116 +#define TG3_MIN_NUM 117 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "December 3, 2010" +#define DRV_MODULE_RELDATE "January 25, 2011" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -1776,9 +1776,29 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up) tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val); - if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || - val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + switch (val) { + case TG3_CL45_D7_EEERES_STAT_LP_1000T: + switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { + case ASIC_REV_5717: + case ASIC_REV_5719: + case ASIC_REV_57765: + /* Enable SM_DSP clock and tx 6dB coding. */ + val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | + MII_TG3_AUXCTL_ACTL_SMDSP_ENA | + MII_TG3_AUXCTL_ACTL_TX_6DB; + tg3_writephy(tp, MII_TG3_AUX_CTRL, val); + + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000); + + /* Turn off SM_DSP clock. */ + val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | + MII_TG3_AUXCTL_ACTL_TX_6DB; + tg3_writephy(tp, MII_TG3_AUX_CTRL, val); + } + /* Fallthrough */ + case TG3_CL45_D7_EEERES_STAT_LP_100TX: tp->setlpicnt = 2; + } } if (!tp->setlpicnt) { @@ -2100,7 +2120,7 @@ out: static void tg3_frob_aux_power(struct tg3 *tp) { - struct tg3 *tp_peer = tp; + bool need_vaux = false; /* The GPIOs do something completely different on 57765. */ if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 || @@ -2108,23 +2128,32 @@ static void tg3_frob_aux_power(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) return; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) && + tp->pdev_peer != tp->pdev) { struct net_device *dev_peer; dev_peer = pci_get_drvdata(tp->pdev_peer); + /* remove_one() may have been run on the peer. */ - if (!dev_peer) - tp_peer = tp; - else - tp_peer = netdev_priv(dev_peer); + if (dev_peer) { + struct tg3 *tp_peer = netdev_priv(dev_peer); + + if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) + return; + + if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) || + (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF)) + need_vaux = true; + } } - if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || - (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 || - (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || - (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) { + if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) || + (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) + need_vaux = true; + + if (need_vaux) { if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | @@ -2154,10 +2183,6 @@ static void tg3_frob_aux_power(struct tg3 *tp) u32 no_gpio2; u32 grc_local_ctrl = 0; - if (tp_peer != tp && - (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) - return; - /* Workaround to prevent overdrawing Amps. */ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { @@ -2196,10 +2221,6 @@ static void tg3_frob_aux_power(struct tg3 *tp) } else { if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { - if (tp_peer != tp && - (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0) - return; - tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl | (GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1), 100); @@ -2968,11 +2989,19 @@ static void tg3_phy_copper_begin(struct tg3 *tp) MII_TG3_AUXCTL_ACTL_TX_6DB; tg3_writephy(tp, MII_TG3_AUX_CTRL, val); - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) && - !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) - tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, - val | MII_TG3_DSP_CH34TP2_HIBW01); + switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { + case ASIC_REV_5717: + case ASIC_REV_57765: + if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) + tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | + MII_TG3_DSP_CH34TP2_HIBW01); + /* Fall through */ + case ASIC_REV_5719: + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO | + MII_TG3_DSP_TAP26_OPCSINPT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + } val = 0; if (tp->link_config.autoneg == AUTONEG_ENABLE) { @@ -7801,7 +7830,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) TG3_CPMU_DBTMR1_LNKIDLE_2047US); tw32_f(TG3_CPMU_EEE_DBTMR2, - TG3_CPMU_DBTMR1_APE_TX_2047US | + TG3_CPMU_DBTMR2_APE_TX_2047US | TG3_CPMU_DBTMR2_TXIDXEQ_2047US); } @@ -8075,8 +8104,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) /* Program the jumbo buffer descriptor ring control * blocks on those devices that have them. */ - if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && - !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && + !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) { /* Setup replenish threshold. */ tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8); @@ -8163,10 +8193,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) RDMAC_MODE_MBUF_RBD_CRPT_ENAB | RDMAC_MODE_MBUF_SBD_CRPT_ENAB; - /* If statement applies to 5705 and 5750 PCI devices only */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE && GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128; @@ -8194,8 +8222,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { val = tr32(TG3_RDMA_RSRVCTRL_REG); if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { - val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK; - val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B; + val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK | + TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK | + TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK); + val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B | + TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K | + TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K; } tw32(TG3_RDMA_RSRVCTRL_REG, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); @@ -8317,7 +8349,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); udelay(100); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) { + if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) && + tp->irq_cnt > 1) { val = tr32(MSGINT_MODE); val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE; tw32(MSGINT_MODE, val); @@ -8334,17 +8367,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB | WDMAC_MODE_LNGREAD_ENAB); - /* If statement applies to 5705 and 5750 PCI devices only */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && - tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) { + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 && + tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) { if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 || tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) { /* nothing */ } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) && - !(tp->tg3_flags2 & TG3_FLG2_IS_5788) && - !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { + !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) { val |= WDMAC_MODE_RX_ACCEL; } } @@ -9057,7 +9087,8 @@ static void tg3_ints_init(struct tg3 *tp) if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) { u32 msi_mode = tr32(MSGINT_MODE); - if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) + if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) && + tp->irq_cnt > 1) msi_mode |= MSGINT_MODE_MULTIVEC_EN; tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE); } @@ -10452,16 +10483,53 @@ static int tg3_test_nvram(struct tg3 *tp) goto out; } + err = -EIO; + /* Bootstrap checksum at offset 0x10 */ csum = calc_crc((unsigned char *) buf, 0x10); - if (csum != be32_to_cpu(buf[0x10/4])) + if (csum != le32_to_cpu(buf[0x10/4])) goto out; /* Manufacturing block starts at offset 0x74, checksum at 0xfc */ csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88); - if (csum != be32_to_cpu(buf[0xfc/4])) + if (csum != le32_to_cpu(buf[0xfc/4])) goto out; + for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) { + /* The data is in little-endian format in NVRAM. + * Use the big-endian read routines to preserve + * the byte order as it exists in NVRAM. + */ + if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &buf[i/4])) + goto out; + } + + i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN, + PCI_VPD_LRDT_RO_DATA); + if (i > 0) { + j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); + if (j < 0) + goto out; + + if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN) + goto out; + + i += PCI_VPD_LRDT_TAG_SIZE; + j = pci_vpd_find_info_keyword((u8 *)buf, i, j, + PCI_VPD_RO_KEYWORD_CHKSUM); + if (j > 0) { + u8 csum8 = 0; + + j += PCI_VPD_INFO_FLD_HDR_SIZE; + + for (i = 0; i <= j; i++) + csum8 += ((u8 *)buf)[i]; + + if (csum8) + goto out; + } + } + err = 0; out: @@ -10833,13 +10901,16 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) if (loopback_mode == TG3_MAC_LOOPBACK) { /* HW errata - mac loopback fails in some cases on 5780. * Normal traffic and PHY loopback are not affected by - * errata. + * errata. Also, the MAC loopback test is deprecated for + * all newer ASIC revisions. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || + (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) return 0; - mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) | - MAC_MODE_PORT_INT_LPBACK; + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + mac_mode |= MAC_MODE_PORT_INT_LPBACK; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) mac_mode |= MAC_MODE_LINK_POLARITY; if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) @@ -10861,7 +10932,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) tg3_writephy(tp, MII_BMCR, val); udelay(40); - mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); if (tp->phy_flags & TG3_PHYFLG_IS_FET) { tg3_writephy(tp, MII_TG3_FET_PTEST, MII_TG3_FET_PTEST_FRC_TX_LINK | @@ -10889,6 +10961,13 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode) MII_TG3_EXT_CTRL_LNK3_LED_MODE); } tw32(MAC_MODE, mac_mode); + + /* Wait for link */ + for (i = 0; i < 100; i++) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + break; + mdelay(1); + } } else { return -EINVAL; } @@ -10995,14 +11074,19 @@ out: static int tg3_test_loopback(struct tg3 *tp) { int err = 0; - u32 cpmuctrl = 0; + u32 eee_cap, cpmuctrl = 0; if (!netif_running(tp->dev)) return TG3_LOOPBACK_FAILED; + eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP; + tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP; + err = tg3_reset_hw(tp, 1); - if (err) - return TG3_LOOPBACK_FAILED; + if (err) { + err = TG3_LOOPBACK_FAILED; + goto done; + } /* Turn off gphy autopowerdown. */ if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) @@ -11022,8 +11106,10 @@ static int tg3_test_loopback(struct tg3 *tp) udelay(10); } - if (status != CPMU_MUTEX_GNT_DRIVER) - return TG3_LOOPBACK_FAILED; + if (status != CPMU_MUTEX_GNT_DRIVER) { + err = TG3_LOOPBACK_FAILED; + goto done; + } /* Turn off link-based power management. */ cpmuctrl = tr32(TG3_CPMU_CTRL); @@ -11052,6 +11138,9 @@ static int tg3_test_loopback(struct tg3 *tp) if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD) tg3_phy_toggle_apd(tp, true); +done: + tp->phy_flags |= eee_cap; + return err; } @@ -12407,9 +12496,11 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp) tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN; } done: - device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP); - device_set_wakeup_enable(&tp->pdev->dev, + if (tp->tg3_flags & TG3_FLAG_WOL_CAP) + device_set_wakeup_enable(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE); + else + device_set_wakeup_capable(&tp->pdev->dev, false); } static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd) @@ -12461,12 +12552,45 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp) return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16); } +static void __devinit tg3_phy_init_link_config(struct tg3 *tp) +{ + u32 adv = ADVERTISED_Autoneg | + ADVERTISED_Pause; + + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) + adv |= ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; + + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_TP; + else + adv |= ADVERTISED_FIBRE; + + tp->link_config.advertising = adv; + tp->link_config.speed = SPEED_INVALID; + tp->link_config.duplex = DUPLEX_INVALID; + tp->link_config.autoneg = AUTONEG_ENABLE; + tp->link_config.active_speed = SPEED_INVALID; + tp->link_config.active_duplex = DUPLEX_INVALID; + tp->link_config.orig_speed = SPEED_INVALID; + tp->link_config.orig_duplex = DUPLEX_INVALID; + tp->link_config.orig_autoneg = AUTONEG_INVALID; +} + static int __devinit tg3_phy_probe(struct tg3 *tp) { u32 hw_phy_id_1, hw_phy_id_2; u32 hw_phy_id, hw_phy_id_masked; int err; + /* flow control autonegotiation is default behavior */ + tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; + tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; + if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) return tg3_phy_init(tp); @@ -12528,6 +12652,8 @@ static int __devinit tg3_phy_probe(struct tg3 *tp) tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))) tp->phy_flags |= TG3_PHYFLG_EEE_CAP; + tg3_phy_init_link_config(tp); + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) { @@ -12583,17 +12709,6 @@ skip_phy_reset: err = tg3_init_5401phy_dsp(tp); } - if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) - tp->link_config.advertising = - (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | - ADVERTISED_FIBRE); - if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) - tp->link_config.advertising &= - ~(ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); - return err; } @@ -13020,7 +13135,7 @@ static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) return 512; } -DEFINE_PCI_DEVICE_TABLE(write_reorder_chipsets) = { +static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, @@ -13262,7 +13377,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) } /* Determine TSO capabilities */ - if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + ; /* Do nothing. HW bug. */ + else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3; else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) @@ -13313,7 +13430,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG; } - if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) + if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719) tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || @@ -13331,42 +13449,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; tp->pcie_readrq = 4096; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) { - u16 word; - - pci_read_config_word(tp->pdev, - tp->pcie_cap + PCI_EXP_LNKSTA, - &word); - switch (word & PCI_EXP_LNKSTA_CLS) { - case PCI_EXP_LNKSTA_CLS_2_5GB: - word &= PCI_EXP_LNKSTA_NLW; - word >>= PCI_EXP_LNKSTA_NLW_SHIFT; - switch (word) { - case 2: - tp->pcie_readrq = 2048; - break; - case 4: - tp->pcie_readrq = 1024; - break; - } - break; - - case PCI_EXP_LNKSTA_CLS_5_0GB: - word &= PCI_EXP_LNKSTA_NLW; - word >>= PCI_EXP_LNKSTA_NLW_SHIFT; - switch (word) { - case 1: - tp->pcie_readrq = 2048; - break; - case 2: - tp->pcie_readrq = 1024; - break; - case 4: - tp->pcie_readrq = 512; - break; - } - } - } + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tp->pcie_readrq = 2048; pcie_set_readrq(tp->pdev, tp->pcie_readrq); @@ -13405,7 +13489,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) * every mailbox register write to force the writes to be * posted to the chip in order. */ - if (pci_dev_present(write_reorder_chipsets) && + if (pci_dev_present(tg3_write_reorder_chipsets) && !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER; @@ -14161,7 +14245,7 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm #define TEST_BUFFER_SIZE 0x2000 -DEFINE_PCI_DEVICE_TABLE(dma_wait_state_chipsets) = { +static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = { { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) }, { }, }; @@ -14340,7 +14424,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp) * now look for chipsets that are known to expose the * DMA bug without failing the test. */ - if (pci_dev_present(dma_wait_state_chipsets)) { + if (pci_dev_present(tg3_dma_wait_state_chipsets)) { tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; } else { @@ -14357,23 +14441,6 @@ out_nofree: return ret; } -static void __devinit tg3_init_link_config(struct tg3 *tp) -{ - tp->link_config.advertising = - (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | ADVERTISED_MII); - tp->link_config.speed = SPEED_INVALID; - tp->link_config.duplex = DUPLEX_INVALID; - tp->link_config.autoneg = AUTONEG_ENABLE; - tp->link_config.active_speed = SPEED_INVALID; - tp->link_config.active_duplex = DUPLEX_INVALID; - tp->link_config.orig_speed = SPEED_INVALID; - tp->link_config.orig_duplex = DUPLEX_INVALID; - tp->link_config.orig_autoneg = AUTONEG_INVALID; -} - static void __devinit tg3_init_bufmgr_config(struct tg3 *tp) { if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) { @@ -14677,8 +14744,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_free_dev; } - tg3_init_link_config(tp); - tp->rx_pending = TG3_DEF_RX_RING_PENDING; tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; @@ -14826,10 +14891,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_apeunmap; } - /* flow control autonegotiation is default behavior */ - tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; - tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; - intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW; rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW; sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW; diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index f528243e1a4f..73884b69b749 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -4,7 +4,7 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2007-2010 Broadcom Corporation. + * Copyright (C) 2007-2011 Broadcom Corporation. */ #ifndef _T3_H @@ -141,6 +141,7 @@ #define CHIPREV_ID_57780_A1 0x57780001 #define CHIPREV_ID_5717_A0 0x05717000 #define CHIPREV_ID_57765_A0 0x57785000 +#define CHIPREV_ID_5719_A0 0x05719000 #define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) #define ASIC_REV_5700 0x07 #define ASIC_REV_5701 0x00 @@ -1105,7 +1106,7 @@ #define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000 #define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff #define TG3_CPMU_EEE_DBTMR2 0x000036b8 -#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000 +#define TG3_CPMU_DBTMR2_APE_TX_2047US 0x07ff0000 #define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff #define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc #define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 @@ -1333,6 +1334,10 @@ #define TG3_RDMA_RSRVCTRL_REG 0x00004900 #define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 +#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K 0x00000c00 +#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK 0x00000ff0 +#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K 0x000c0000 +#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK 0x000ff000 #define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000 #define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000 /* 0x4904 --> 0x4910 unused */ @@ -2108,6 +2113,10 @@ #define MII_TG3_DSP_TAP1 0x0001 #define MII_TG3_DSP_TAP1_AGCTGT_DFLT 0x0007 +#define MII_TG3_DSP_TAP26 0x001a +#define MII_TG3_DSP_TAP26_ALNOKO 0x0001 +#define MII_TG3_DSP_TAP26_RMRXSTO 0x0002 +#define MII_TG3_DSP_TAP26_OPCSINPT 0x0004 #define MII_TG3_DSP_AADJ1CH0 0x001f #define MII_TG3_DSP_CH34TP2 0x4022 #define MII_TG3_DSP_CH34TP2_HIBW01 0x0010 diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index f8e463cd8ecc..ace6404e2fac 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c @@ -25,150 +25,9 @@ * Microchip Technology, 24C01A/02A/04A Data Sheet * available in PDF format from www.microchip.com * - * Change History - * - * Tigran Aivazian <tigran@sco.com>: TLan_PciProbe() now uses - * new PCI BIOS interface. - * Alan Cox <alan@lxorguk.ukuu.org.uk>: - * Fixed the out of memory - * handling. - * - * Torben Mathiasen <torben.mathiasen@compaq.com> New Maintainer! - * - * v1.1 Dec 20, 1999 - Removed linux version checking - * Patch from Tigran Aivazian. - * - v1.1 includes Alan's SMP updates. - * - We still have problems on SMP though, - * but I'm looking into that. - * - * v1.2 Jan 02, 2000 - Hopefully fixed the SMP deadlock. - * - Removed dependency of HZ being 100. - * - We now allow higher priority timers to - * overwrite timers like TLAN_TIMER_ACTIVITY - * Patch from John Cagle <john.cagle@compaq.com>. - * - Fixed a few compiler warnings. - * - * v1.3 Feb 04, 2000 - Fixed the remaining HZ issues. - * - Removed call to pci_present(). - * - Removed SA_INTERRUPT flag from irq handler. - * - Added __init and __initdata to reduce resisdent - * code size. - * - Driver now uses module_init/module_exit. - * - Rewrote init_module and tlan_probe to - * share a lot more code. We now use tlan_probe - * with builtin and module driver. - * - Driver ported to new net API. - * - tlan.txt has been reworked to reflect current - * driver (almost) - * - Other minor stuff - * - * v1.4 Feb 10, 2000 - Updated with more changes required after Dave's - * network cleanup in 2.3.43pre7 (Tigran & myself) - * - Minor stuff. - * - * v1.5 March 22, 2000 - Fixed another timer bug that would hang the driver - * if no cable/link were present. - * - Cosmetic changes. - * - TODO: Port completely to new PCI/DMA API - * Auto-Neg fallback. - * - * v1.6 April 04, 2000 - Fixed driver support for kernel-parameters. Haven't - * tested it though, as the kernel support is currently - * broken (2.3.99p4p3). - * - Updated tlan.txt accordingly. - * - Adjusted minimum/maximum frame length. - * - There is now a TLAN website up at - * http://hp.sourceforge.net/ - * - * v1.7 April 07, 2000 - Started to implement custom ioctls. Driver now - * reports PHY information when used with Donald - * Beckers userspace MII diagnostics utility. - * - * v1.8 April 23, 2000 - Fixed support for forced speed/duplex settings. - * - Added link information to Auto-Neg and forced - * modes. When NIC operates with auto-neg the driver - * will report Link speed & duplex modes as well as - * link partner abilities. When forced link is used, - * the driver will report status of the established - * link. - * Please read tlan.txt for additional information. - * - Removed call to check_region(), and used - * return value of request_region() instead. - * - * v1.8a May 28, 2000 - Minor updates. - * - * v1.9 July 25, 2000 - Fixed a few remaining Full-Duplex issues. - * - Updated with timer fixes from Andrew Morton. - * - Fixed module race in TLan_Open. - * - Added routine to monitor PHY status. - * - Added activity led support for Proliant devices. - * - * v1.10 Aug 30, 2000 - Added support for EISA based tlan controllers - * like the Compaq NetFlex3/E. - * - Rewrote tlan_probe to better handle multiple - * bus probes. Probing and device setup is now - * done through TLan_Probe and TLan_init_one. Actual - * hardware probe is done with kernel API and - * TLan_EisaProbe. - * - Adjusted debug information for probing. - * - Fixed bug that would cause general debug information - * to be printed after driver removal. - * - Added transmit timeout handling. - * - Fixed OOM return values in tlan_probe. - * - Fixed possible mem leak in tlan_exit - * (now tlan_remove_one). - * - Fixed timer bug in TLan_phyMonitor. - * - This driver version is alpha quality, please - * send me any bug issues you may encounter. - * - * v1.11 Aug 31, 2000 - Do not try to register irq 0 if no irq line was - * set for EISA cards. - * - Added support for NetFlex3/E with nibble-rate - * 10Base-T PHY. This is untestet as I haven't got - * one of these cards. - * - Fixed timer being added twice. - * - Disabled PhyMonitoring by default as this is - * work in progress. Define MONITOR to enable it. - * - Now we don't display link info with PHYs that - * doesn't support it (level1). - * - Incresed tx_timeout beacuse of auto-neg. - * - Adjusted timers for forced speeds. - * - * v1.12 Oct 12, 2000 - Minor fixes (memleak, init, etc.) - * - * v1.13 Nov 28, 2000 - Stop flooding console with auto-neg issues - * when link can't be established. - * - Added the bbuf option as a kernel parameter. - * - Fixed ioaddr probe bug. - * - Fixed stupid deadlock with MII interrupts. - * - Added support for speed/duplex selection with - * multiple nics. - * - Added partly fix for TX Channel lockup with - * TLAN v1.0 silicon. This needs to be investigated - * further. - * - * v1.14 Dec 16, 2000 - Added support for servicing multiple frames per. - * interrupt. Thanks goes to - * Adam Keys <adam@ti.com> - * Denis Beaudoin <dbeaudoin@ti.com> - * for providing the patch. - * - Fixed auto-neg output when using multiple - * adapters. - * - Converted to use new taskq interface. - * - * v1.14a Jan 6, 2001 - Minor adjustments (spinlocks, etc.) - * - * Samuel Chessman <chessman@tux.org> New Maintainer! - * - * v1.15 Apr 4, 2002 - Correct operation when aui=1 to be - * 10T half duplex no loopback - * Thanks to Gunnar Eikman - * - * Sakari Ailus <sakari.ailus@iki.fi>: - * - * v1.15a Dec 15 2008 - Remove bbuf support, it doesn't work anyway. - * - *******************************************************************************/ + ******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> @@ -185,13 +44,11 @@ #include "tlan.h" -typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 ); - /* For removing EISA devices */ -static struct net_device *TLan_Eisa_Devices; +static struct net_device *tlan_eisa_devices; -static int TLanDevicesInstalled; +static int tlan_devices_installed; /* Set speed, duplex and aui settings */ static int aui[MAX_TLAN_BOARDS]; @@ -202,8 +59,9 @@ module_param_array(aui, int, NULL, 0); module_param_array(duplex, int, NULL, 0); module_param_array(speed, int, NULL, 0); MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)"); -MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)"); -MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)"); +MODULE_PARM_DESC(duplex, + "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)"); +MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)"); MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>"); MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters"); @@ -218,139 +76,144 @@ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); -static const char TLanSignature[] = "TLAN"; -static const char tlan_banner[] = "ThunderLAN driver v1.15a\n"; +static const char tlan_signature[] = "TLAN"; +static const char tlan_banner[] = "ThunderLAN driver v1.17\n"; static int tlan_have_pci; static int tlan_have_eisa; -static const char *media[] = { - "10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ", - "100baseTx-FD", "100baseT4", NULL +static const char * const media[] = { + "10BaseT-HD", "10BaseT-FD", "100baseTx-HD", + "100BaseTx-FD", "100BaseT4", NULL }; static struct board { - const char *deviceLabel; - u32 flags; - u16 addrOfs; + const char *device_label; + u32 flags; + u16 addr_ofs; } board_info[] = { { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, - { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, + { "Compaq Netelligent 10/100 TX PCI UTP", + TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, { "Compaq NetFlex-3/P", TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, { "Compaq Netelligent Integrated 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, - { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 }, - { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 }, + { "Compaq Netelligent Dual 10/100 TX PCI UTP", + TLAN_ADAPTER_NONE, 0x83 }, + { "Compaq Netelligent 10/100 TX Embedded UTP", + TLAN_ADAPTER_NONE, 0x83 }, { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, - { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 }, - { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 }, + { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 }, + { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 }, { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, - { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 }, + { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 }, { "Compaq NetFlex-3/E", - TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ + TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, - { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ + { "Compaq NetFlex-3/E", + TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ }; static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = { { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 }, { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 }, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 }, { 0,} }; MODULE_DEVICE_TABLE(pci, tlan_pci_tbl); -static void TLan_EisaProbe( void ); -static void TLan_Eisa_Cleanup( void ); -static int TLan_Init( struct net_device * ); -static int TLan_Open( struct net_device *dev ); -static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *); -static irqreturn_t TLan_HandleInterrupt( int, void *); -static int TLan_Close( struct net_device *); -static struct net_device_stats *TLan_GetStats( struct net_device *); -static void TLan_SetMulticastList( struct net_device *); -static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); -static int TLan_probe1( struct pci_dev *pdev, long ioaddr, - int irq, int rev, const struct pci_device_id *ent); -static void TLan_tx_timeout( struct net_device *dev); -static void TLan_tx_timeout_work(struct work_struct *work); -static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); - -static u32 TLan_HandleTxEOF( struct net_device *, u16 ); -static u32 TLan_HandleStatOverflow( struct net_device *, u16 ); -static u32 TLan_HandleRxEOF( struct net_device *, u16 ); -static u32 TLan_HandleDummy( struct net_device *, u16 ); -static u32 TLan_HandleTxEOC( struct net_device *, u16 ); -static u32 TLan_HandleStatusCheck( struct net_device *, u16 ); -static u32 TLan_HandleRxEOC( struct net_device *, u16 ); - -static void TLan_Timer( unsigned long ); - -static void TLan_ResetLists( struct net_device * ); -static void TLan_FreeLists( struct net_device * ); -static void TLan_PrintDio( u16 ); -static void TLan_PrintList( TLanList *, char *, int ); -static void TLan_ReadAndClearStats( struct net_device *, int ); -static void TLan_ResetAdapter( struct net_device * ); -static void TLan_FinishReset( struct net_device * ); -static void TLan_SetMac( struct net_device *, int areg, char *mac ); - -static void TLan_PhyPrint( struct net_device * ); -static void TLan_PhyDetect( struct net_device * ); -static void TLan_PhyPowerDown( struct net_device * ); -static void TLan_PhyPowerUp( struct net_device * ); -static void TLan_PhyReset( struct net_device * ); -static void TLan_PhyStartLink( struct net_device * ); -static void TLan_PhyFinishAutoNeg( struct net_device * ); +static void tlan_eisa_probe(void); +static void tlan_eisa_cleanup(void); +static int tlan_init(struct net_device *); +static int tlan_open(struct net_device *dev); +static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *); +static irqreturn_t tlan_handle_interrupt(int, void *); +static int tlan_close(struct net_device *); +static struct net_device_stats *tlan_get_stats(struct net_device *); +static void tlan_set_multicast_list(struct net_device *); +static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int tlan_probe1(struct pci_dev *pdev, long ioaddr, + int irq, int rev, const struct pci_device_id *ent); +static void tlan_tx_timeout(struct net_device *dev); +static void tlan_tx_timeout_work(struct work_struct *work); +static int tlan_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent); + +static u32 tlan_handle_tx_eof(struct net_device *, u16); +static u32 tlan_handle_stat_overflow(struct net_device *, u16); +static u32 tlan_handle_rx_eof(struct net_device *, u16); +static u32 tlan_handle_dummy(struct net_device *, u16); +static u32 tlan_handle_tx_eoc(struct net_device *, u16); +static u32 tlan_handle_status_check(struct net_device *, u16); +static u32 tlan_handle_rx_eoc(struct net_device *, u16); + +static void tlan_timer(unsigned long); + +static void tlan_reset_lists(struct net_device *); +static void tlan_free_lists(struct net_device *); +static void tlan_print_dio(u16); +static void tlan_print_list(struct tlan_list *, char *, int); +static void tlan_read_and_clear_stats(struct net_device *, int); +static void tlan_reset_adapter(struct net_device *); +static void tlan_finish_reset(struct net_device *); +static void tlan_set_mac(struct net_device *, int areg, char *mac); + +static void tlan_phy_print(struct net_device *); +static void tlan_phy_detect(struct net_device *); +static void tlan_phy_power_down(struct net_device *); +static void tlan_phy_power_up(struct net_device *); +static void tlan_phy_reset(struct net_device *); +static void tlan_phy_start_link(struct net_device *); +static void tlan_phy_finish_auto_neg(struct net_device *); #ifdef MONITOR -static void TLan_PhyMonitor( struct net_device * ); +static void tlan_phy_monitor(struct net_device *); #endif /* -static int TLan_PhyNop( struct net_device * ); -static int TLan_PhyInternalCheck( struct net_device * ); -static int TLan_PhyInternalService( struct net_device * ); -static int TLan_PhyDp83840aCheck( struct net_device * ); + static int tlan_phy_nop(struct net_device *); + static int tlan_phy_internal_check(struct net_device *); + static int tlan_phy_internal_service(struct net_device *); + static int tlan_phy_dp83840a_check(struct net_device *); */ -static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * ); -static void TLan_MiiSendData( u16, u32, unsigned ); -static void TLan_MiiSync( u16 ); -static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 ); +static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *); +static void tlan_mii_send_data(u16, u32, unsigned); +static void tlan_mii_sync(u16); +static void tlan_mii_write_reg(struct net_device *, u16, u16, u16); -static void TLan_EeSendStart( u16 ); -static int TLan_EeSendByte( u16, u8, int ); -static void TLan_EeReceiveByte( u16, u8 *, int ); -static int TLan_EeReadByte( struct net_device *, u8, u8 * ); +static void tlan_ee_send_start(u16); +static int tlan_ee_send_byte(u16, u8, int); +static void tlan_ee_receive_byte(u16, u8 *, int); +static int tlan_ee_read_byte(struct net_device *, u8, u8 *); static inline void -TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) +tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb) { unsigned long addr = (unsigned long)skb; tag->buffer[9].address = addr; @@ -358,7 +221,7 @@ TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb) } static inline struct sk_buff * -TLan_GetSKB( const struct tlan_list_tag *tag) +tlan_get_skb(const struct tlan_list *tag) { unsigned long addr; @@ -367,50 +230,50 @@ TLan_GetSKB( const struct tlan_list_tag *tag) return (struct sk_buff *) addr; } - -static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = { +static u32 +(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = { NULL, - TLan_HandleTxEOF, - TLan_HandleStatOverflow, - TLan_HandleRxEOF, - TLan_HandleDummy, - TLan_HandleTxEOC, - TLan_HandleStatusCheck, - TLan_HandleRxEOC + tlan_handle_tx_eof, + tlan_handle_stat_overflow, + tlan_handle_rx_eof, + tlan_handle_dummy, + tlan_handle_tx_eoc, + tlan_handle_status_check, + tlan_handle_rx_eoc }; static inline void -TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type ) +tlan_set_timer(struct net_device *dev, u32 ticks, u32 type) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); unsigned long flags = 0; if (!in_irq()) spin_lock_irqsave(&priv->lock, flags); - if ( priv->timer.function != NULL && - priv->timerType != TLAN_TIMER_ACTIVITY ) { + if (priv->timer.function != NULL && + priv->timer_type != TLAN_TIMER_ACTIVITY) { if (!in_irq()) spin_unlock_irqrestore(&priv->lock, flags); return; } - priv->timer.function = TLan_Timer; + priv->timer.function = tlan_timer; if (!in_irq()) spin_unlock_irqrestore(&priv->lock, flags); priv->timer.data = (unsigned long) dev; - priv->timerSetAt = jiffies; - priv->timerType = type; + priv->timer_set_at = jiffies; + priv->timer_type = type; mod_timer(&priv->timer, jiffies + ticks); -} /* TLan_SetTimer */ +} /***************************************************************************** ****************************************************************************** - ThunderLAN Driver Primary Functions +ThunderLAN driver primary functions - These functions are more or less common to all Linux network drivers. +these functions are more or less common to all linux network drivers. ****************************************************************************** *****************************************************************************/ @@ -419,56 +282,124 @@ TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type ) - /*************************************************************** - * tlan_remove_one - * - * Returns: - * Nothing - * Parms: - * None - * - * Goes through the TLanDevices list and frees the device - * structs and memory associated with each device (lists - * and buffers). It also ureserves the IO port regions - * associated with this device. - * - **************************************************************/ +/*************************************************************** + * tlan_remove_one + * + * Returns: + * Nothing + * Parms: + * None + * + * Goes through the TLanDevices list and frees the device + * structs and memory associated with each device (lists + * and buffers). It also ureserves the IO port regions + * associated with this device. + * + **************************************************************/ -static void __devexit tlan_remove_one( struct pci_dev *pdev) +static void __devexit tlan_remove_one(struct pci_dev *pdev) { - struct net_device *dev = pci_get_drvdata( pdev ); - TLanPrivateInfo *priv = netdev_priv(dev); + struct net_device *dev = pci_get_drvdata(pdev); + struct tlan_priv *priv = netdev_priv(dev); - unregister_netdev( dev ); + unregister_netdev(dev); - if ( priv->dmaStorage ) { - pci_free_consistent(priv->pciDev, - priv->dmaSize, priv->dmaStorage, - priv->dmaStorageDMA ); + if (priv->dma_storage) { + pci_free_consistent(priv->pci_dev, + priv->dma_size, priv->dma_storage, + priv->dma_storage_dma); } #ifdef CONFIG_PCI pci_release_regions(pdev); #endif - free_netdev( dev ); + free_netdev(dev); - pci_set_drvdata( pdev, NULL ); + pci_set_drvdata(pdev, NULL); } +static void tlan_start(struct net_device *dev) +{ + tlan_reset_lists(dev); + /* NOTE: It might not be necessary to read the stats before a + reset if you don't care what the values are. + */ + tlan_read_and_clear_stats(dev, TLAN_IGNORE); + tlan_reset_adapter(dev); + netif_wake_queue(dev); +} + +static void tlan_stop(struct net_device *dev) +{ + struct tlan_priv *priv = netdev_priv(dev); + + tlan_read_and_clear_stats(dev, TLAN_RECORD); + outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD); + /* Reset and power down phy */ + tlan_reset_adapter(dev); + if (priv->timer.function != NULL) { + del_timer_sync(&priv->timer); + priv->timer.function = NULL; + } +} + +#ifdef CONFIG_PM + +static int tlan_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (netif_running(dev)) + tlan_stop(dev); + + netif_device_detach(dev); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int tlan_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_enable_wake(pdev, 0, 0); + netif_device_attach(dev); + + if (netif_running(dev)) + tlan_start(dev); + + return 0; +} + +#else /* CONFIG_PM */ + +#define tlan_suspend NULL +#define tlan_resume NULL + +#endif /* CONFIG_PM */ + + static struct pci_driver tlan_driver = { .name = "tlan", .id_table = tlan_pci_tbl, .probe = tlan_init_one, .remove = __devexit_p(tlan_remove_one), + .suspend = tlan_suspend, + .resume = tlan_resume, }; static int __init tlan_probe(void) { int rc = -ENODEV; - printk(KERN_INFO "%s", tlan_banner); + pr_info("%s", tlan_banner); TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n"); @@ -477,18 +408,18 @@ static int __init tlan_probe(void) rc = pci_register_driver(&tlan_driver); if (rc != 0) { - printk(KERN_ERR "TLAN: Could not register pci driver.\n"); + pr_err("Could not register pci driver\n"); goto err_out_pci_free; } TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n"); - TLan_EisaProbe(); + tlan_eisa_probe(); - printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d EISA: %d\n", - TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s", - tlan_have_pci, tlan_have_eisa); + pr_info("%d device%s installed, PCI: %d EISA: %d\n", + tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s", + tlan_have_pci, tlan_have_eisa); - if (TLanDevicesInstalled == 0) { + if (tlan_devices_installed == 0) { rc = -ENODEV; goto err_out_pci_unreg; } @@ -501,39 +432,39 @@ err_out_pci_free: } -static int __devinit tlan_init_one( struct pci_dev *pdev, - const struct pci_device_id *ent) +static int __devinit tlan_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) { - return TLan_probe1( pdev, -1, -1, 0, ent); + return tlan_probe1(pdev, -1, -1, 0, ent); } /* - *************************************************************** - * tlan_probe1 - * - * Returns: - * 0 on success, error code on error - * Parms: - * none - * - * The name is lower case to fit in with all the rest of - * the netcard_probe names. This function looks for - * another TLan based adapter, setting it up with the - * allocated device struct if one is found. - * tlan_probe has been ported to the new net API and - * now allocates its own device structure. This function - * is also used by modules. - * - **************************************************************/ - -static int __devinit TLan_probe1(struct pci_dev *pdev, +*************************************************************** +* tlan_probe1 +* +* Returns: +* 0 on success, error code on error +* Parms: +* none +* +* The name is lower case to fit in with all the rest of +* the netcard_probe names. This function looks for +* another TLan based adapter, setting it up with the +* allocated device struct if one is found. +* tlan_probe has been ported to the new net API and +* now allocates its own device structure. This function +* is also used by modules. +* +**************************************************************/ + +static int __devinit tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev, - const struct pci_device_id *ent ) + const struct pci_device_id *ent) { struct net_device *dev; - TLanPrivateInfo *priv; + struct tlan_priv *priv; u16 device_id; int reg, rc = -ENODEV; @@ -543,17 +474,17 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, if (rc) return rc; - rc = pci_request_regions(pdev, TLanSignature); + rc = pci_request_regions(pdev, tlan_signature); if (rc) { - printk(KERN_ERR "TLAN: Could not reserve IO regions\n"); + pr_err("Could not reserve IO regions\n"); goto err_out; } } #endif /* CONFIG_PCI */ - dev = alloc_etherdev(sizeof(TLanPrivateInfo)); + dev = alloc_etherdev(sizeof(struct tlan_priv)); if (dev == NULL) { - printk(KERN_ERR "TLAN: Could not allocate memory for device.\n"); + pr_err("Could not allocate memory for device\n"); rc = -ENOMEM; goto err_out_regions; } @@ -561,38 +492,39 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, priv = netdev_priv(dev); - priv->pciDev = pdev; + priv->pci_dev = pdev; priv->dev = dev; /* Is this a PCI device? */ if (pdev) { - u32 pci_io_base = 0; + u32 pci_io_base = 0; priv->adapter = &board_info[ent->driver_data]; rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { - printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n"); + pr_err("No suitable PCI mapping available\n"); goto err_out_free_dev; } - for ( reg= 0; reg <= 5; reg ++ ) { + for (reg = 0; reg <= 5; reg++) { if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) { pci_io_base = pci_resource_start(pdev, reg); - TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n", - pci_io_base); + TLAN_DBG(TLAN_DEBUG_GNRL, + "IO mapping is available at %x.\n", + pci_io_base); break; } } if (!pci_io_base) { - printk(KERN_ERR "TLAN: No IO mappings available\n"); + pr_err("No IO mappings available\n"); rc = -EIO; goto err_out_free_dev; } dev->base_addr = pci_io_base; dev->irq = pdev->irq; - priv->adapterRev = pdev->revision; + priv->adapter_rev = pdev->revision; pci_set_master(pdev); pci_set_drvdata(pdev, dev); @@ -602,11 +534,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, device_id = inw(ioaddr + EISA_ID2); priv->is_eisa = 1; if (device_id == 0x20F1) { - priv->adapter = &board_info[13]; /* NetFlex-3/E */ - priv->adapterRev = 23; /* TLAN 2.3 */ + priv->adapter = &board_info[13]; /* NetFlex-3/E */ + priv->adapter_rev = 23; /* TLAN 2.3 */ } else { priv->adapter = &board_info[14]; - priv->adapterRev = 10; /* TLAN 1.0 */ + priv->adapter_rev = 10; /* TLAN 1.0 */ } dev->base_addr = ioaddr; dev->irq = irq; @@ -620,11 +552,11 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 : (dev->mem_start & 0x18) >> 3; - if (priv->speed == 0x1) { + if (priv->speed == 0x1) priv->speed = TLAN_SPEED_10; - } else if (priv->speed == 0x2) { + else if (priv->speed == 0x2) priv->speed = TLAN_SPEED_100; - } + debug = priv->debug = dev->mem_end; } else { priv->aui = aui[boards_found]; @@ -635,46 +567,45 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, /* This will be used when we get an adapter error from * within our irq handler */ - INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work); + INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work); spin_lock_init(&priv->lock); - rc = TLan_Init(dev); + rc = tlan_init(dev); if (rc) { - printk(KERN_ERR "TLAN: Could not set up device.\n"); + pr_err("Could not set up device\n"); goto err_out_free_dev; } rc = register_netdev(dev); if (rc) { - printk(KERN_ERR "TLAN: Could not register device.\n"); + pr_err("Could not register device\n"); goto err_out_uninit; } - TLanDevicesInstalled++; + tlan_devices_installed++; boards_found++; /* pdev is NULL if this is an EISA device */ if (pdev) tlan_have_pci++; else { - priv->nextDevice = TLan_Eisa_Devices; - TLan_Eisa_Devices = dev; + priv->next_device = tlan_eisa_devices; + tlan_eisa_devices = dev; tlan_have_eisa++; } - printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n", - dev->name, - (int) dev->irq, - (int) dev->base_addr, - priv->adapter->deviceLabel, - priv->adapterRev); + netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n", + (int)dev->irq, + (int)dev->base_addr, + priv->adapter->device_label, + priv->adapter_rev); return 0; err_out_uninit: - pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, - priv->dmaStorageDMA ); + pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage, + priv->dma_storage_dma); err_out_free_dev: free_netdev(dev); err_out_regions: @@ -689,22 +620,23 @@ err_out: } -static void TLan_Eisa_Cleanup(void) +static void tlan_eisa_cleanup(void) { struct net_device *dev; - TLanPrivateInfo *priv; + struct tlan_priv *priv; - while( tlan_have_eisa ) { - dev = TLan_Eisa_Devices; + while (tlan_have_eisa) { + dev = tlan_eisa_devices; priv = netdev_priv(dev); - if (priv->dmaStorage) { - pci_free_consistent(priv->pciDev, priv->dmaSize, - priv->dmaStorage, priv->dmaStorageDMA ); + if (priv->dma_storage) { + pci_free_consistent(priv->pci_dev, priv->dma_size, + priv->dma_storage, + priv->dma_storage_dma); } - release_region( dev->base_addr, 0x10); - unregister_netdev( dev ); - TLan_Eisa_Devices = priv->nextDevice; - free_netdev( dev ); + release_region(dev->base_addr, 0x10); + unregister_netdev(dev); + tlan_eisa_devices = priv->next_device; + free_netdev(dev); tlan_have_eisa--; } } @@ -715,7 +647,7 @@ static void __exit tlan_exit(void) pci_unregister_driver(&tlan_driver); if (tlan_have_eisa) - TLan_Eisa_Cleanup(); + tlan_eisa_cleanup(); } @@ -726,24 +658,24 @@ module_exit(tlan_exit); - /************************************************************** - * TLan_EisaProbe - * - * Returns: 0 on success, 1 otherwise - * - * Parms: None - * - * - * This functions probes for EISA devices and calls - * TLan_probe1 when one is found. - * - *************************************************************/ +/************************************************************** + * tlan_eisa_probe + * + * Returns: 0 on success, 1 otherwise + * + * Parms: None + * + * + * This functions probes for EISA devices and calls + * TLan_probe1 when one is found. + * + *************************************************************/ -static void __init TLan_EisaProbe (void) +static void __init tlan_eisa_probe(void) { - long ioaddr; - int rc = -ENODEV; - int irq; + long ioaddr; + int rc = -ENODEV; + int irq; u16 device_id; if (!EISA_bus) { @@ -754,15 +686,16 @@ static void __init TLan_EisaProbe (void) /* Loop through all slots of the EISA bus */ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { - TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", - (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID)); - TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", - (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2)); + TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n", + (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID)); + TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n", + (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2)); - TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ", - (int) ioaddr); - if (request_region(ioaddr, 0x10, TLanSignature) == NULL) + TLAN_DBG(TLAN_DEBUG_PROBE, + "Probing for EISA adapter at IO: 0x%4x : ", + (int) ioaddr); + if (request_region(ioaddr, 0x10, tlan_signature) == NULL) goto out; if (inw(ioaddr + EISA_ID) != 0x110E) { @@ -772,326 +705,324 @@ static void __init TLan_EisaProbe (void) device_id = inw(ioaddr + EISA_ID2); if (device_id != 0x20F1 && device_id != 0x40F1) { - release_region (ioaddr, 0x10); + release_region(ioaddr, 0x10); goto out; } - if (inb(ioaddr + EISA_CR) != 0x1) { /* Check if adapter is enabled */ - release_region (ioaddr, 0x10); + /* check if adapter is enabled */ + if (inb(ioaddr + EISA_CR) != 0x1) { + release_region(ioaddr, 0x10); goto out2; } if (debug == 0x10) - printk("Found one\n"); + pr_info("Found one\n"); /* Get irq from board */ - switch (inb(ioaddr + 0xCC0)) { - case(0x10): - irq=5; - break; - case(0x20): - irq=9; - break; - case(0x40): - irq=10; - break; - case(0x80): - irq=11; - break; - default: - goto out; + switch (inb(ioaddr + 0xcc0)) { + case(0x10): + irq = 5; + break; + case(0x20): + irq = 9; + break; + case(0x40): + irq = 10; + break; + case(0x80): + irq = 11; + break; + default: + goto out; } /* Setup the newly found eisa adapter */ - rc = TLan_probe1( NULL, ioaddr, irq, - 12, NULL); + rc = tlan_probe1(NULL, ioaddr, irq, + 12, NULL); continue; - out: - if (debug == 0x10) - printk("None found\n"); - continue; +out: + if (debug == 0x10) + pr_info("None found\n"); + continue; - out2: if (debug == 0x10) - printk("Card found but it is not enabled, skipping\n"); - continue; +out2: + if (debug == 0x10) + pr_info("Card found but it is not enabled, skipping\n"); + continue; } -} /* TLan_EisaProbe */ +} #ifdef CONFIG_NET_POLL_CONTROLLER -static void TLan_Poll(struct net_device *dev) +static void tlan_poll(struct net_device *dev) { disable_irq(dev->irq); - TLan_HandleInterrupt(dev->irq, dev); + tlan_handle_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif -static const struct net_device_ops TLan_netdev_ops = { - .ndo_open = TLan_Open, - .ndo_stop = TLan_Close, - .ndo_start_xmit = TLan_StartTx, - .ndo_tx_timeout = TLan_tx_timeout, - .ndo_get_stats = TLan_GetStats, - .ndo_set_multicast_list = TLan_SetMulticastList, - .ndo_do_ioctl = TLan_ioctl, +static const struct net_device_ops tlan_netdev_ops = { + .ndo_open = tlan_open, + .ndo_stop = tlan_close, + .ndo_start_xmit = tlan_start_tx, + .ndo_tx_timeout = tlan_tx_timeout, + .ndo_get_stats = tlan_get_stats, + .ndo_set_multicast_list = tlan_set_multicast_list, + .ndo_do_ioctl = tlan_ioctl, .ndo_change_mtu = eth_change_mtu, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = TLan_Poll, + .ndo_poll_controller = tlan_poll, #endif }; - /*************************************************************** - * TLan_Init - * - * Returns: - * 0 on success, error code otherwise. - * Parms: - * dev The structure of the device to be - * init'ed. - * - * This function completes the initialization of the - * device structure and driver. It reserves the IO - * addresses, allocates memory for the lists and bounce - * buffers, retrieves the MAC address from the eeprom - * and assignes the device's methods. - * - **************************************************************/ - -static int TLan_Init( struct net_device *dev ) +/*************************************************************** + * tlan_init + * + * Returns: + * 0 on success, error code otherwise. + * Parms: + * dev The structure of the device to be + * init'ed. + * + * This function completes the initialization of the + * device structure and driver. It reserves the IO + * addresses, allocates memory for the lists and bounce + * buffers, retrieves the MAC address from the eeprom + * and assignes the device's methods. + * + **************************************************************/ + +static int tlan_init(struct net_device *dev) { int dma_size; - int err; + int err; int i; - TLanPrivateInfo *priv; + struct tlan_priv *priv; priv = netdev_priv(dev); - dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS ) - * ( sizeof(TLanList) ); - priv->dmaStorage = pci_alloc_consistent(priv->pciDev, - dma_size, &priv->dmaStorageDMA); - priv->dmaSize = dma_size; + dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS) + * (sizeof(struct tlan_list)); + priv->dma_storage = pci_alloc_consistent(priv->pci_dev, + dma_size, + &priv->dma_storage_dma); + priv->dma_size = dma_size; - if ( priv->dmaStorage == NULL ) { - printk(KERN_ERR "TLAN: Could not allocate lists and buffers for %s.\n", - dev->name ); + if (priv->dma_storage == NULL) { + pr_err("Could not allocate lists and buffers for %s\n", + dev->name); return -ENOMEM; } - memset( priv->dmaStorage, 0, dma_size ); - priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8); - priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8); - priv->txList = priv->rxList + TLAN_NUM_RX_LISTS; - priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS; + memset(priv->dma_storage, 0, dma_size); + priv->rx_list = (struct tlan_list *) + ALIGN((unsigned long)priv->dma_storage, 8); + priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8); + priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS; + priv->tx_list_dma = + priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS; err = 0; - for ( i = 0; i < 6 ; i++ ) - err |= TLan_EeReadByte( dev, - (u8) priv->adapter->addrOfs + i, - (u8 *) &dev->dev_addr[i] ); - if ( err ) { - printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n", - dev->name, - err ); + for (i = 0; i < 6 ; i++) + err |= tlan_ee_read_byte(dev, + (u8) priv->adapter->addr_ofs + i, + (u8 *) &dev->dev_addr[i]); + if (err) { + pr_err("%s: Error reading MAC from eeprom: %d\n", + dev->name, err); } dev->addr_len = 6; netif_carrier_off(dev); /* Device methods */ - dev->netdev_ops = &TLan_netdev_ops; + dev->netdev_ops = &tlan_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; return 0; -} /* TLan_Init */ +} - /*************************************************************** - * TLan_Open - * - * Returns: - * 0 on success, error code otherwise. - * Parms: - * dev Structure of device to be opened. - * - * This routine puts the driver and TLAN adapter in a - * state where it is ready to send and receive packets. - * It allocates the IRQ, resets and brings the adapter - * out of reset, and allows interrupts. It also delays - * the startup for autonegotiation or sends a Rx GO - * command to the adapter, as appropriate. - * - **************************************************************/ +/*************************************************************** + * tlan_open + * + * Returns: + * 0 on success, error code otherwise. + * Parms: + * dev Structure of device to be opened. + * + * This routine puts the driver and TLAN adapter in a + * state where it is ready to send and receive packets. + * It allocates the IRQ, resets and brings the adapter + * out of reset, and allows interrupts. It also delays + * the startup for autonegotiation or sends a Rx GO + * command to the adapter, as appropriate. + * + **************************************************************/ -static int TLan_Open( struct net_device *dev ) +static int tlan_open(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); int err; - priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION ); - err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED, - dev->name, dev ); + priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION); + err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED, + dev->name, dev); - if ( err ) { - pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n", - dev->name, dev->irq ); + if (err) { + netdev_err(dev, "Cannot open because IRQ %d is already in use\n", + dev->irq); return err; } init_timer(&priv->timer); - netif_start_queue(dev); - /* NOTE: It might not be necessary to read the stats before a - reset if you don't care what the values are. - */ - TLan_ResetLists( dev ); - TLan_ReadAndClearStats( dev, TLAN_IGNORE ); - TLan_ResetAdapter( dev ); + tlan_start(dev); - TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", - dev->name, priv->tlanRev ); + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", + dev->name, priv->tlan_rev); return 0; -} /* TLan_Open */ +} - /************************************************************** - * TLan_ioctl - * - * Returns: - * 0 on success, error code otherwise - * Params: - * dev structure of device to receive ioctl. - * - * rq ifreq structure to hold userspace data. - * - * cmd ioctl command. - * - * - *************************************************************/ +/************************************************************** + * tlan_ioctl + * + * Returns: + * 0 on success, error code otherwise + * Params: + * dev structure of device to receive ioctl. + * + * rq ifreq structure to hold userspace data. + * + * cmd ioctl command. + * + * + *************************************************************/ -static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(rq); - u32 phy = priv->phy[priv->phyNum]; + u32 phy = priv->phy[priv->phy_num]; - if (!priv->phyOnline) + if (!priv->phy_online) return -EAGAIN; - switch(cmd) { - case SIOCGMIIPHY: /* Get address of MII PHY in use. */ - data->phy_id = phy; + switch (cmd) { + case SIOCGMIIPHY: /* get address of MII PHY in use. */ + data->phy_id = phy; - case SIOCGMIIREG: /* Read MII PHY register. */ - TLan_MiiReadReg(dev, data->phy_id & 0x1f, - data->reg_num & 0x1f, &data->val_out); - return 0; + case SIOCGMIIREG: /* read MII PHY register. */ + tlan_mii_read_reg(dev, data->phy_id & 0x1f, + data->reg_num & 0x1f, &data->val_out); + return 0; - case SIOCSMIIREG: /* Write MII PHY register. */ - TLan_MiiWriteReg(dev, data->phy_id & 0x1f, - data->reg_num & 0x1f, data->val_in); - return 0; - default: - return -EOPNOTSUPP; + case SIOCSMIIREG: /* write MII PHY register. */ + tlan_mii_write_reg(dev, data->phy_id & 0x1f, + data->reg_num & 0x1f, data->val_in); + return 0; + default: + return -EOPNOTSUPP; } -} /* tlan_ioctl */ +} - /*************************************************************** - * TLan_tx_timeout - * - * Returns: nothing - * - * Params: - * dev structure of device which timed out - * during transmit. - * - **************************************************************/ +/*************************************************************** + * tlan_tx_timeout + * + * Returns: nothing + * + * Params: + * dev structure of device which timed out + * during transmit. + * + **************************************************************/ -static void TLan_tx_timeout(struct net_device *dev) +static void tlan_tx_timeout(struct net_device *dev) { - TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name); + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name); /* Ok so we timed out, lets see what we can do about it...*/ - TLan_FreeLists( dev ); - TLan_ResetLists( dev ); - TLan_ReadAndClearStats( dev, TLAN_IGNORE ); - TLan_ResetAdapter( dev ); + tlan_free_lists(dev); + tlan_reset_lists(dev); + tlan_read_and_clear_stats(dev, TLAN_IGNORE); + tlan_reset_adapter(dev); dev->trans_start = jiffies; /* prevent tx timeout */ - netif_wake_queue( dev ); + netif_wake_queue(dev); } - /*************************************************************** - * TLan_tx_timeout_work - * - * Returns: nothing - * - * Params: - * work work item of device which timed out - * - **************************************************************/ +/*************************************************************** + * tlan_tx_timeout_work + * + * Returns: nothing + * + * Params: + * work work item of device which timed out + * + **************************************************************/ -static void TLan_tx_timeout_work(struct work_struct *work) +static void tlan_tx_timeout_work(struct work_struct *work) { - TLanPrivateInfo *priv = - container_of(work, TLanPrivateInfo, tlan_tqueue); + struct tlan_priv *priv = + container_of(work, struct tlan_priv, tlan_tqueue); - TLan_tx_timeout(priv->dev); + tlan_tx_timeout(priv->dev); } - /*************************************************************** - * TLan_StartTx - * - * Returns: - * 0 on success, non-zero on failure. - * Parms: - * skb A pointer to the sk_buff containing the - * frame to be sent. - * dev The device to send the data on. - * - * This function adds a frame to the Tx list to be sent - * ASAP. First it verifies that the adapter is ready and - * there is room in the queue. Then it sets up the next - * available list, copies the frame to the corresponding - * buffer. If the adapter Tx channel is idle, it gives - * the adapter a Tx Go command on the list, otherwise it - * sets the forward address of the previous list to point - * to this one. Then it frees the sk_buff. - * - **************************************************************/ - -static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) +/*************************************************************** + * tlan_start_tx + * + * Returns: + * 0 on success, non-zero on failure. + * Parms: + * skb A pointer to the sk_buff containing the + * frame to be sent. + * dev The device to send the data on. + * + * This function adds a frame to the Tx list to be sent + * ASAP. First it verifies that the adapter is ready and + * there is room in the queue. Then it sets up the next + * available list, copies the frame to the corresponding + * buffer. If the adapter Tx channel is idle, it gives + * the adapter a Tx Go command on the list, otherwise it + * sets the forward address of the previous list to point + * to this one. Then it frees the sk_buff. + * + **************************************************************/ + +static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); dma_addr_t tail_list_phys; - TLanList *tail_list; + struct tlan_list *tail_list; unsigned long flags; unsigned int txlen; - if ( ! priv->phyOnline ) { - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", - dev->name ); + if (!priv->phy_online) { + TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", + dev->name); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1100,218 +1031,214 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev ) return NETDEV_TX_OK; txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE); - tail_list = priv->txList + priv->txTail; - tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; + tail_list = priv->tx_list + priv->tx_tail; + tail_list_phys = + priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail; - if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) { - TLAN_DBG( TLAN_DEBUG_TX, - "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", - dev->name, priv->txHead, priv->txTail ); + if (tail_list->c_stat != TLAN_CSTAT_UNUSED) { + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", + dev->name, priv->tx_head, priv->tx_tail); netif_stop_queue(dev); - priv->txBusyCount++; + priv->tx_busy_count++; return NETDEV_TX_BUSY; } tail_list->forward = 0; - tail_list->buffer[0].address = pci_map_single(priv->pciDev, + tail_list->buffer[0].address = pci_map_single(priv->pci_dev, skb->data, txlen, PCI_DMA_TODEVICE); - TLan_StoreSKB(tail_list, skb); + tlan_store_skb(tail_list, skb); - tail_list->frameSize = (u16) txlen; + tail_list->frame_size = (u16) txlen; tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen; tail_list->buffer[1].count = 0; tail_list->buffer[1].address = 0; spin_lock_irqsave(&priv->lock, flags); - tail_list->cStat = TLAN_CSTAT_READY; - if ( ! priv->txInProgress ) { - priv->txInProgress = 1; - TLAN_DBG( TLAN_DEBUG_TX, - "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); - outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM ); - outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD ); + tail_list->c_stat = TLAN_CSTAT_READY; + if (!priv->tx_in_progress) { + priv->tx_in_progress = 1; + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: Starting TX on buffer %d\n", + priv->tx_tail); + outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM); + outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD); } else { - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", - priv->txTail ); - if ( priv->txTail == 0 ) { - ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: Adding buffer %d to TX channel\n", + priv->tx_tail); + if (priv->tx_tail == 0) { + (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward = tail_list_phys; } else { - ( priv->txList + ( priv->txTail - 1 ) )->forward + (priv->tx_list + (priv->tx_tail - 1))->forward = tail_list_phys; } } spin_unlock_irqrestore(&priv->lock, flags); - CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS ); + CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS); return NETDEV_TX_OK; -} /* TLan_StartTx */ +} - /*************************************************************** - * TLan_HandleInterrupt - * - * Returns: - * Nothing - * Parms: - * irq The line on which the interrupt - * occurred. - * dev_id A pointer to the device assigned to - * this irq line. - * - * This function handles an interrupt generated by its - * assigned TLAN adapter. The function deactivates - * interrupts on its adapter, records the type of - * interrupt, executes the appropriate subhandler, and - * acknowdges the interrupt to the adapter (thus - * re-enabling adapter interrupts. - * - **************************************************************/ +/*************************************************************** + * tlan_handle_interrupt + * + * Returns: + * Nothing + * Parms: + * irq The line on which the interrupt + * occurred. + * dev_id A pointer to the device assigned to + * this irq line. + * + * This function handles an interrupt generated by its + * assigned TLAN adapter. The function deactivates + * interrupts on its adapter, records the type of + * interrupt, executes the appropriate subhandler, and + * acknowdges the interrupt to the adapter (thus + * re-enabling adapter interrupts. + * + **************************************************************/ -static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id) +static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u16 host_int; u16 type; spin_lock(&priv->lock); - host_int = inw( dev->base_addr + TLAN_HOST_INT ); - type = ( host_int & TLAN_HI_IT_MASK ) >> 2; - if ( type ) { + host_int = inw(dev->base_addr + TLAN_HOST_INT); + type = (host_int & TLAN_HI_IT_MASK) >> 2; + if (type) { u32 ack; u32 host_cmd; - outw( host_int, dev->base_addr + TLAN_HOST_INT ); - ack = TLanIntVector[type]( dev, host_int ); + outw(host_int, dev->base_addr + TLAN_HOST_INT); + ack = tlan_int_vector[type](dev, host_int); - if ( ack ) { - host_cmd = TLAN_HC_ACK | ack | ( type << 18 ); - outl( host_cmd, dev->base_addr + TLAN_HOST_CMD ); + if (ack) { + host_cmd = TLAN_HC_ACK | ack | (type << 18); + outl(host_cmd, dev->base_addr + TLAN_HOST_CMD); } } spin_unlock(&priv->lock); return IRQ_RETVAL(type); -} /* TLan_HandleInterrupts */ +} - /*************************************************************** - * TLan_Close - * - * Returns: - * An error code. - * Parms: - * dev The device structure of the device to - * close. - * - * This function shuts down the adapter. It records any - * stats, puts the adapter into reset state, deactivates - * its time as needed, and frees the irq it is using. - * - **************************************************************/ +/*************************************************************** + * tlan_close + * + * Returns: + * An error code. + * Parms: + * dev The device structure of the device to + * close. + * + * This function shuts down the adapter. It records any + * stats, puts the adapter into reset state, deactivates + * its time as needed, and frees the irq it is using. + * + **************************************************************/ -static int TLan_Close(struct net_device *dev) +static int tlan_close(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); - netif_stop_queue(dev); priv->neg_be_verbose = 0; + tlan_stop(dev); - TLan_ReadAndClearStats( dev, TLAN_RECORD ); - outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); - if ( priv->timer.function != NULL ) { - del_timer_sync( &priv->timer ); - priv->timer.function = NULL; - } - - free_irq( dev->irq, dev ); - TLan_FreeLists( dev ); - TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name ); + free_irq(dev->irq, dev); + tlan_free_lists(dev); + TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name); return 0; -} /* TLan_Close */ +} - /*************************************************************** - * TLan_GetStats - * - * Returns: - * A pointer to the device's statistics structure. - * Parms: - * dev The device structure to return the - * stats for. - * - * This function updates the devices statistics by reading - * the TLAN chip's onboard registers. Then it returns the - * address of the statistics structure. - * - **************************************************************/ +/*************************************************************** + * tlan_get_stats + * + * Returns: + * A pointer to the device's statistics structure. + * Parms: + * dev The device structure to return the + * stats for. + * + * This function updates the devices statistics by reading + * the TLAN chip's onboard registers. Then it returns the + * address of the statistics structure. + * + **************************************************************/ -static struct net_device_stats *TLan_GetStats( struct net_device *dev ) +static struct net_device_stats *tlan_get_stats(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); int i; /* Should only read stats if open ? */ - TLan_ReadAndClearStats( dev, TLAN_RECORD ); + tlan_read_and_clear_stats(dev, TLAN_RECORD); - TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, - priv->rxEocCount ); - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, - priv->txBusyCount ); - if ( debug & TLAN_DEBUG_GNRL ) { - TLan_PrintDio( dev->base_addr ); - TLan_PhyPrint( dev ); + TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, + priv->rx_eoc_count); + TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, + priv->tx_busy_count); + if (debug & TLAN_DEBUG_GNRL) { + tlan_print_dio(dev->base_addr); + tlan_phy_print(dev); } - if ( debug & TLAN_DEBUG_LIST ) { - for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) - TLan_PrintList( priv->rxList + i, "RX", i ); - for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) - TLan_PrintList( priv->txList + i, "TX", i ); + if (debug & TLAN_DEBUG_LIST) { + for (i = 0; i < TLAN_NUM_RX_LISTS; i++) + tlan_print_list(priv->rx_list + i, "RX", i); + for (i = 0; i < TLAN_NUM_TX_LISTS; i++) + tlan_print_list(priv->tx_list + i, "TX", i); } return &dev->stats; -} /* TLan_GetStats */ +} - /*************************************************************** - * TLan_SetMulticastList - * - * Returns: - * Nothing - * Parms: - * dev The device structure to set the - * multicast list for. - * - * This function sets the TLAN adaptor to various receive - * modes. If the IFF_PROMISC flag is set, promiscuous - * mode is acitviated. Otherwise, promiscuous mode is - * turned off. If the IFF_ALLMULTI flag is set, then - * the hash table is set to receive all group addresses. - * Otherwise, the first three multicast addresses are - * stored in AREG_1-3, and the rest are selected via the - * hash table, as necessary. - * - **************************************************************/ +/*************************************************************** + * tlan_set_multicast_list + * + * Returns: + * Nothing + * Parms: + * dev The device structure to set the + * multicast list for. + * + * This function sets the TLAN adaptor to various receive + * modes. If the IFF_PROMISC flag is set, promiscuous + * mode is acitviated. Otherwise, promiscuous mode is + * turned off. If the IFF_ALLMULTI flag is set, then + * the hash table is set to receive all group addresses. + * Otherwise, the first three multicast addresses are + * stored in AREG_1-3, and the rest are selected via the + * hash table, as necessary. + * + **************************************************************/ -static void TLan_SetMulticastList( struct net_device *dev ) +static void tlan_set_multicast_list(struct net_device *dev) { struct netdev_hw_addr *ha; u32 hash1 = 0; @@ -1320,53 +1247,56 @@ static void TLan_SetMulticastList( struct net_device *dev ) u32 offset; u8 tmp; - if ( dev->flags & IFF_PROMISC ) { - tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); - TLan_DioWrite8( dev->base_addr, - TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF ); + if (dev->flags & IFF_PROMISC) { + tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD); + tlan_dio_write8(dev->base_addr, + TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF); } else { - tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); - TLan_DioWrite8( dev->base_addr, - TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF ); - if ( dev->flags & IFF_ALLMULTI ) { - for ( i = 0; i < 3; i++ ) - TLan_SetMac( dev, i + 1, NULL ); - TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF ); - TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF ); + tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD); + tlan_dio_write8(dev->base_addr, + TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF); + if (dev->flags & IFF_ALLMULTI) { + for (i = 0; i < 3; i++) + tlan_set_mac(dev, i + 1, NULL); + tlan_dio_write32(dev->base_addr, TLAN_HASH_1, + 0xffffffff); + tlan_dio_write32(dev->base_addr, TLAN_HASH_2, + 0xffffffff); } else { i = 0; netdev_for_each_mc_addr(ha, dev) { - if ( i < 3 ) { - TLan_SetMac( dev, i + 1, + if (i < 3) { + tlan_set_mac(dev, i + 1, (char *) &ha->addr); } else { - offset = TLan_HashFunc((u8 *)&ha->addr); - if ( offset < 32 ) - hash1 |= ( 1 << offset ); + offset = + tlan_hash_func((u8 *)&ha->addr); + if (offset < 32) + hash1 |= (1 << offset); else - hash2 |= ( 1 << ( offset - 32 ) ); + hash2 |= (1 << (offset - 32)); } i++; } - for ( ; i < 3; i++ ) - TLan_SetMac( dev, i + 1, NULL ); - TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 ); - TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 ); + for ( ; i < 3; i++) + tlan_set_mac(dev, i + 1, NULL); + tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1); + tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2); } } -} /* TLan_SetMulticastList */ +} /***************************************************************************** ****************************************************************************** - ThunderLAN Driver Interrupt Vectors and Table +ThunderLAN driver interrupt vectors and table - Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN - Programmer's Guide" for more informations on handling interrupts - generated by TLAN based adapters. +please see chap. 4, "Interrupt Handling" of the "ThunderLAN +Programmer's Guide" for more informations on handling interrupts +generated by TLAN based adapters. ****************************************************************************** *****************************************************************************/ @@ -1374,46 +1304,48 @@ static void TLan_SetMulticastList( struct net_device *dev ) - /*************************************************************** - * TLan_HandleTxEOF - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This function handles Tx EOF interrupts which are raised - * by the adapter when it has completed sending the - * contents of a buffer. If detemines which list/buffer - * was completed and resets it. If the buffer was the last - * in the channel (EOC), then the function checks to see if - * another buffer is ready to send, and if so, sends a Tx - * Go command. Finally, the driver activates/continues the - * activity LED. - * - **************************************************************/ - -static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) +/*************************************************************** + * tlan_handle_tx_eof + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles Tx EOF interrupts which are raised + * by the adapter when it has completed sending the + * contents of a buffer. If detemines which list/buffer + * was completed and resets it. If the buffer was the last + * in the channel (EOC), then the function checks to see if + * another buffer is ready to send, and if so, sends a Tx + * Go command. Finally, the driver activates/continues the + * activity LED. + * + **************************************************************/ + +static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); int eoc = 0; - TLanList *head_list; + struct tlan_list *head_list; dma_addr_t head_list_phys; u32 ack = 0; - u16 tmpCStat; + u16 tmp_c_stat; - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", - priv->txHead, priv->txTail ); - head_list = priv->txList + priv->txHead; + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", + priv->tx_head, priv->tx_tail); + head_list = priv->tx_list + priv->tx_head; - while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { - struct sk_buff *skb = TLan_GetSKB(head_list); + while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP) + && (ack < 255)) { + struct sk_buff *skb = tlan_get_skb(head_list); ack++; - pci_unmap_single(priv->pciDev, head_list->buffer[0].address, + pci_unmap_single(priv->pci_dev, head_list->buffer[0].address, max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE), PCI_DMA_TODEVICE); @@ -1421,304 +1353,313 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int ) head_list->buffer[8].address = 0; head_list->buffer[9].address = 0; - if ( tmpCStat & TLAN_CSTAT_EOC ) + if (tmp_c_stat & TLAN_CSTAT_EOC) eoc = 1; - dev->stats.tx_bytes += head_list->frameSize; + dev->stats.tx_bytes += head_list->frame_size; - head_list->cStat = TLAN_CSTAT_UNUSED; + head_list->c_stat = TLAN_CSTAT_UNUSED; netif_start_queue(dev); - CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS ); - head_list = priv->txList + priv->txHead; + CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS); + head_list = priv->tx_list + priv->tx_head; } if (!ack) - printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n"); - - if ( eoc ) { - TLAN_DBG( TLAN_DEBUG_TX, - "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", - priv->txHead, priv->txTail ); - head_list = priv->txList + priv->txHead; - head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; - if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { - outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); + netdev_info(dev, + "Received interrupt for uncompleted TX frame\n"); + + if (eoc) { + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n", + priv->tx_head, priv->tx_tail); + head_list = priv->tx_list + priv->tx_head; + head_list_phys = priv->tx_list_dma + + sizeof(struct tlan_list)*priv->tx_head; + if ((head_list->c_stat & TLAN_CSTAT_READY) + == TLAN_CSTAT_READY) { + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); ack |= TLAN_HC_GO; } else { - priv->txInProgress = 0; + priv->tx_in_progress = 0; } } - if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { - TLan_DioWrite8( dev->base_addr, - TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); - if ( priv->timer.function == NULL ) { - priv->timer.function = TLan_Timer; - priv->timer.data = (unsigned long) dev; - priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; - priv->timerSetAt = jiffies; - priv->timerType = TLAN_TIMER_ACTIVITY; - add_timer(&priv->timer); - } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { - priv->timerSetAt = jiffies; + if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) { + tlan_dio_write8(dev->base_addr, + TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT); + if (priv->timer.function == NULL) { + priv->timer.function = tlan_timer; + priv->timer.data = (unsigned long) dev; + priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; + priv->timer_set_at = jiffies; + priv->timer_type = TLAN_TIMER_ACTIVITY; + add_timer(&priv->timer); + } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) { + priv->timer_set_at = jiffies; } } return ack; -} /* TLan_HandleTxEOF */ +} - /*************************************************************** - * TLan_HandleStatOverflow - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This function handles the Statistics Overflow interrupt - * which means that one or more of the TLAN statistics - * registers has reached 1/2 capacity and needs to be read. - * - **************************************************************/ +/*************************************************************** + * TLan_HandleStatOverflow + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles the Statistics Overflow interrupt + * which means that one or more of the TLAN statistics + * registers has reached 1/2 capacity and needs to be read. + * + **************************************************************/ -static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int ) +static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int) { - TLan_ReadAndClearStats( dev, TLAN_RECORD ); + tlan_read_and_clear_stats(dev, TLAN_RECORD); return 1; -} /* TLan_HandleStatOverflow */ - - - - - /*************************************************************** - * TLan_HandleRxEOF - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This function handles the Rx EOF interrupt which - * indicates a frame has been received by the adapter from - * the net and the frame has been transferred to memory. - * The function determines the bounce buffer the frame has - * been loaded into, creates a new sk_buff big enough to - * hold the frame, and sends it to protocol stack. It - * then resets the used buffer and appends it to the end - * of the list. If the frame was the last in the Rx - * channel (EOC), the function restarts the receive channel - * by sending an Rx Go command to the adapter. Then it - * activates/continues the activity LED. - * - **************************************************************/ - -static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int ) +} + + + + +/*************************************************************** + * TLan_HandleRxEOF + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles the Rx EOF interrupt which + * indicates a frame has been received by the adapter from + * the net and the frame has been transferred to memory. + * The function determines the bounce buffer the frame has + * been loaded into, creates a new sk_buff big enough to + * hold the frame, and sends it to protocol stack. It + * then resets the used buffer and appends it to the end + * of the list. If the frame was the last in the Rx + * channel (EOC), the function restarts the receive channel + * by sending an Rx Go command to the adapter. Then it + * activates/continues the activity LED. + * + **************************************************************/ + +static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u32 ack = 0; int eoc = 0; - TLanList *head_list; + struct tlan_list *head_list; struct sk_buff *skb; - TLanList *tail_list; - u16 tmpCStat; + struct tlan_list *tail_list; + u16 tmp_c_stat; dma_addr_t head_list_phys; - TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", - priv->rxHead, priv->rxTail ); - head_list = priv->rxList + priv->rxHead; - head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; + TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n", + priv->rx_head, priv->rx_tail); + head_list = priv->rx_list + priv->rx_head; + head_list_phys = + priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head; - while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { - dma_addr_t frameDma = head_list->buffer[0].address; - u32 frameSize = head_list->frameSize; + while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP) + && (ack < 255)) { + dma_addr_t frame_dma = head_list->buffer[0].address; + u32 frame_size = head_list->frame_size; struct sk_buff *new_skb; ack++; - if (tmpCStat & TLAN_CSTAT_EOC) + if (tmp_c_stat & TLAN_CSTAT_EOC) eoc = 1; new_skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); - if ( !new_skb ) + if (!new_skb) goto drop_and_reuse; - skb = TLan_GetSKB(head_list); - pci_unmap_single(priv->pciDev, frameDma, + skb = tlan_get_skb(head_list); + pci_unmap_single(priv->pci_dev, frame_dma, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); - skb_put( skb, frameSize ); + skb_put(skb, frame_size); - dev->stats.rx_bytes += frameSize; + dev->stats.rx_bytes += frame_size; - skb->protocol = eth_type_trans( skb, dev ); - netif_rx( skb ); + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); - head_list->buffer[0].address = pci_map_single(priv->pciDev, - new_skb->data, - TLAN_MAX_FRAME_SIZE, - PCI_DMA_FROMDEVICE); + head_list->buffer[0].address = + pci_map_single(priv->pci_dev, new_skb->data, + TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); - TLan_StoreSKB(head_list, new_skb); + tlan_store_skb(head_list, new_skb); drop_and_reuse: head_list->forward = 0; - head_list->cStat = 0; - tail_list = priv->rxList + priv->rxTail; + head_list->c_stat = 0; + tail_list = priv->rx_list + priv->rx_tail; tail_list->forward = head_list_phys; - CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS ); - CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS ); - head_list = priv->rxList + priv->rxHead; - head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; + CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS); + CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS); + head_list = priv->rx_list + priv->rx_head; + head_list_phys = priv->rx_list_dma + + sizeof(struct tlan_list)*priv->rx_head; } if (!ack) - printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n"); - - - if ( eoc ) { - TLAN_DBG( TLAN_DEBUG_RX, - "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", - priv->rxHead, priv->rxTail ); - head_list = priv->rxList + priv->rxHead; - head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; - outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); + netdev_info(dev, + "Received interrupt for uncompleted RX frame\n"); + + + if (eoc) { + TLAN_DBG(TLAN_DEBUG_RX, + "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n", + priv->rx_head, priv->rx_tail); + head_list = priv->rx_list + priv->rx_head; + head_list_phys = priv->rx_list_dma + + sizeof(struct tlan_list)*priv->rx_head; + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); ack |= TLAN_HC_GO | TLAN_HC_RT; - priv->rxEocCount++; + priv->rx_eoc_count++; } - if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { - TLan_DioWrite8( dev->base_addr, - TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); - if ( priv->timer.function == NULL ) { - priv->timer.function = TLan_Timer; + if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) { + tlan_dio_write8(dev->base_addr, + TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT); + if (priv->timer.function == NULL) { + priv->timer.function = tlan_timer; priv->timer.data = (unsigned long) dev; priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; - priv->timerSetAt = jiffies; - priv->timerType = TLAN_TIMER_ACTIVITY; + priv->timer_set_at = jiffies; + priv->timer_type = TLAN_TIMER_ACTIVITY; add_timer(&priv->timer); - } else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) { - priv->timerSetAt = jiffies; + } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) { + priv->timer_set_at = jiffies; } } return ack; -} /* TLan_HandleRxEOF */ +} - /*************************************************************** - * TLan_HandleDummy - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This function handles the Dummy interrupt, which is - * raised whenever a test interrupt is generated by setting - * the Req_Int bit of HOST_CMD to 1. - * - **************************************************************/ +/*************************************************************** + * tlan_handle_dummy + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles the Dummy interrupt, which is + * raised whenever a test interrupt is generated by setting + * the Req_Int bit of HOST_CMD to 1. + * + **************************************************************/ -static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int ) +static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int) { - printk( "TLAN: Test interrupt on %s.\n", dev->name ); + netdev_info(dev, "Test interrupt\n"); return 1; -} /* TLan_HandleDummy */ +} - /*************************************************************** - * TLan_HandleTxEOC - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This driver is structured to determine EOC occurrences by - * reading the CSTAT member of the list structure. Tx EOC - * interrupts are disabled via the DIO INTDIS register. - * However, TLAN chips before revision 3.0 didn't have this - * functionality, so process EOC events if this is the - * case. - * - **************************************************************/ +/*************************************************************** + * tlan_handle_tx_eoc + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This driver is structured to determine EOC occurrences by + * reading the CSTAT member of the list structure. Tx EOC + * interrupts are disabled via the DIO INTDIS register. + * However, TLAN chips before revision 3.0 didn't have this + * functionality, so process EOC events if this is the + * case. + * + **************************************************************/ -static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int ) +static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int) { - TLanPrivateInfo *priv = netdev_priv(dev); - TLanList *head_list; + struct tlan_priv *priv = netdev_priv(dev); + struct tlan_list *head_list; dma_addr_t head_list_phys; u32 ack = 1; host_int = 0; - if ( priv->tlanRev < 0x30 ) { - TLAN_DBG( TLAN_DEBUG_TX, - "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", - priv->txHead, priv->txTail ); - head_list = priv->txList + priv->txHead; - head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; - if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { + if (priv->tlan_rev < 0x30) { + TLAN_DBG(TLAN_DEBUG_TX, + "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n", + priv->tx_head, priv->tx_tail); + head_list = priv->tx_list + priv->tx_head; + head_list_phys = priv->tx_list_dma + + sizeof(struct tlan_list)*priv->tx_head; + if ((head_list->c_stat & TLAN_CSTAT_READY) + == TLAN_CSTAT_READY) { netif_stop_queue(dev); - outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); ack |= TLAN_HC_GO; } else { - priv->txInProgress = 0; + priv->tx_in_progress = 0; } } return ack; -} /* TLan_HandleTxEOC */ +} - /*************************************************************** - * TLan_HandleStatusCheck - * - * Returns: - * 0 if Adapter check, 1 if Network Status check. - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This function handles Adapter Check/Network Status - * interrupts generated by the adapter. It checks the - * vector in the HOST_INT register to determine if it is - * an Adapter Check interrupt. If so, it resets the - * adapter. Otherwise it clears the status registers - * and services the PHY. - * - **************************************************************/ +/*************************************************************** + * tlan_handle_status_check + * + * Returns: + * 0 if Adapter check, 1 if Network Status check. + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This function handles Adapter Check/Network Status + * interrupts generated by the adapter. It checks the + * vector in the HOST_INT register to determine if it is + * an Adapter Check interrupt. If so, it resets the + * adapter. Otherwise it clears the status registers + * and services the PHY. + * + **************************************************************/ -static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int ) +static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u32 ack; u32 error; u8 net_sts; @@ -1727,92 +1668,94 @@ static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int ) u16 tlphy_sts; ack = 1; - if ( host_int & TLAN_HI_IV_MASK ) { - netif_stop_queue( dev ); - error = inl( dev->base_addr + TLAN_CH_PARM ); - printk( "TLAN: %s: Adaptor Error = 0x%x\n", dev->name, error ); - TLan_ReadAndClearStats( dev, TLAN_RECORD ); - outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD ); + if (host_int & TLAN_HI_IV_MASK) { + netif_stop_queue(dev); + error = inl(dev->base_addr + TLAN_CH_PARM); + netdev_info(dev, "Adaptor Error = 0x%x\n", error); + tlan_read_and_clear_stats(dev, TLAN_RECORD); + outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD); schedule_work(&priv->tlan_tqueue); netif_wake_queue(dev); ack = 0; } else { - TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name ); - phy = priv->phy[priv->phyNum]; - - net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS ); - if ( net_sts ) { - TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts ); - TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", - dev->name, (unsigned) net_sts ); + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name); + phy = priv->phy[priv->phy_num]; + + net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS); + if (net_sts) { + tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts); + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", + dev->name, (unsigned) net_sts); } - if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) { - TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts ); - TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); - if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && - ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { - tlphy_ctl |= TLAN_TC_SWAPOL; - TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); - } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && - ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { - tlphy_ctl &= ~TLAN_TC_SWAPOL; - TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); - } - - if (debug) { - TLan_PhyPrint( dev ); + if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) { + tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts); + tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl); + if (!(tlphy_sts & TLAN_TS_POLOK) && + !(tlphy_ctl & TLAN_TC_SWAPOL)) { + tlphy_ctl |= TLAN_TC_SWAPOL; + tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, + tlphy_ctl); + } else if ((tlphy_sts & TLAN_TS_POLOK) && + (tlphy_ctl & TLAN_TC_SWAPOL)) { + tlphy_ctl &= ~TLAN_TC_SWAPOL; + tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, + tlphy_ctl); } + + if (debug) + tlan_phy_print(dev); } } return ack; -} /* TLan_HandleStatusCheck */ +} - /*************************************************************** - * TLan_HandleRxEOC - * - * Returns: - * 1 - * Parms: - * dev Device assigned the IRQ that was - * raised. - * host_int The contents of the HOST_INT - * port. - * - * This driver is structured to determine EOC occurrences by - * reading the CSTAT member of the list structure. Rx EOC - * interrupts are disabled via the DIO INTDIS register. - * However, TLAN chips before revision 3.0 didn't have this - * CSTAT member or a INTDIS register, so if this chip is - * pre-3.0, process EOC interrupts normally. - * - **************************************************************/ +/*************************************************************** + * tlan_handle_rx_eoc + * + * Returns: + * 1 + * Parms: + * dev Device assigned the IRQ that was + * raised. + * host_int The contents of the HOST_INT + * port. + * + * This driver is structured to determine EOC occurrences by + * reading the CSTAT member of the list structure. Rx EOC + * interrupts are disabled via the DIO INTDIS register. + * However, TLAN chips before revision 3.0 didn't have this + * CSTAT member or a INTDIS register, so if this chip is + * pre-3.0, process EOC interrupts normally. + * + **************************************************************/ -static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) +static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); dma_addr_t head_list_phys; u32 ack = 1; - if ( priv->tlanRev < 0x30 ) { - TLAN_DBG( TLAN_DEBUG_RX, - "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", - priv->rxHead, priv->rxTail ); - head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; - outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); + if (priv->tlan_rev < 0x30) { + TLAN_DBG(TLAN_DEBUG_RX, + "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n", + priv->rx_head, priv->rx_tail); + head_list_phys = priv->rx_list_dma + + sizeof(struct tlan_list)*priv->rx_head; + outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); ack |= TLAN_HC_GO | TLAN_HC_RT; - priv->rxEocCount++; + priv->rx_eoc_count++; } return ack; -} /* TLan_HandleRxEOC */ +} @@ -1820,98 +1763,98 @@ static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int ) /***************************************************************************** ****************************************************************************** - ThunderLAN Driver Timer Function +ThunderLAN driver timer function ****************************************************************************** *****************************************************************************/ - /*************************************************************** - * TLan_Timer - * - * Returns: - * Nothing - * Parms: - * data A value given to add timer when - * add_timer was called. - * - * This function handles timed functionality for the - * TLAN driver. The two current timer uses are for - * delaying for autonegotionation and driving the ACT LED. - * - Autonegotiation requires being allowed about - * 2 1/2 seconds before attempting to transmit a - * packet. It would be a very bad thing to hang - * the kernel this long, so the driver doesn't - * allow transmission 'til after this time, for - * certain PHYs. It would be much nicer if all - * PHYs were interrupt-capable like the internal - * PHY. - * - The ACT LED, which shows adapter activity, is - * driven by the driver, and so must be left on - * for a short period to power up the LED so it - * can be seen. This delay can be changed by - * changing the TLAN_TIMER_ACT_DELAY in tlan.h, - * if desired. 100 ms produces a slightly - * sluggish response. - * - **************************************************************/ - -static void TLan_Timer( unsigned long data ) +/*************************************************************** + * tlan_timer + * + * Returns: + * Nothing + * Parms: + * data A value given to add timer when + * add_timer was called. + * + * This function handles timed functionality for the + * TLAN driver. The two current timer uses are for + * delaying for autonegotionation and driving the ACT LED. + * - Autonegotiation requires being allowed about + * 2 1/2 seconds before attempting to transmit a + * packet. It would be a very bad thing to hang + * the kernel this long, so the driver doesn't + * allow transmission 'til after this time, for + * certain PHYs. It would be much nicer if all + * PHYs were interrupt-capable like the internal + * PHY. + * - The ACT LED, which shows adapter activity, is + * driven by the driver, and so must be left on + * for a short period to power up the LED so it + * can be seen. This delay can be changed by + * changing the TLAN_TIMER_ACT_DELAY in tlan.h, + * if desired. 100 ms produces a slightly + * sluggish response. + * + **************************************************************/ + +static void tlan_timer(unsigned long data) { struct net_device *dev = (struct net_device *) data; - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u32 elapsed; unsigned long flags = 0; priv->timer.function = NULL; - switch ( priv->timerType ) { + switch (priv->timer_type) { #ifdef MONITOR - case TLAN_TIMER_LINK_BEAT: - TLan_PhyMonitor( dev ); - break; + case TLAN_TIMER_LINK_BEAT: + tlan_phy_monitor(dev); + break; #endif - case TLAN_TIMER_PHY_PDOWN: - TLan_PhyPowerDown( dev ); - break; - case TLAN_TIMER_PHY_PUP: - TLan_PhyPowerUp( dev ); - break; - case TLAN_TIMER_PHY_RESET: - TLan_PhyReset( dev ); - break; - case TLAN_TIMER_PHY_START_LINK: - TLan_PhyStartLink( dev ); - break; - case TLAN_TIMER_PHY_FINISH_AN: - TLan_PhyFinishAutoNeg( dev ); - break; - case TLAN_TIMER_FINISH_RESET: - TLan_FinishReset( dev ); - break; - case TLAN_TIMER_ACTIVITY: - spin_lock_irqsave(&priv->lock, flags); - if ( priv->timer.function == NULL ) { - elapsed = jiffies - priv->timerSetAt; - if ( elapsed >= TLAN_TIMER_ACT_DELAY ) { - TLan_DioWrite8( dev->base_addr, - TLAN_LED_REG, TLAN_LED_LINK ); - } else { - priv->timer.function = TLan_Timer; - priv->timer.expires = priv->timerSetAt - + TLAN_TIMER_ACT_DELAY; - spin_unlock_irqrestore(&priv->lock, flags); - add_timer( &priv->timer ); - break; - } + case TLAN_TIMER_PHY_PDOWN: + tlan_phy_power_down(dev); + break; + case TLAN_TIMER_PHY_PUP: + tlan_phy_power_up(dev); + break; + case TLAN_TIMER_PHY_RESET: + tlan_phy_reset(dev); + break; + case TLAN_TIMER_PHY_START_LINK: + tlan_phy_start_link(dev); + break; + case TLAN_TIMER_PHY_FINISH_AN: + tlan_phy_finish_auto_neg(dev); + break; + case TLAN_TIMER_FINISH_RESET: + tlan_finish_reset(dev); + break; + case TLAN_TIMER_ACTIVITY: + spin_lock_irqsave(&priv->lock, flags); + if (priv->timer.function == NULL) { + elapsed = jiffies - priv->timer_set_at; + if (elapsed >= TLAN_TIMER_ACT_DELAY) { + tlan_dio_write8(dev->base_addr, + TLAN_LED_REG, TLAN_LED_LINK); + } else { + priv->timer.function = tlan_timer; + priv->timer.expires = priv->timer_set_at + + TLAN_TIMER_ACT_DELAY; + spin_unlock_irqrestore(&priv->lock, flags); + add_timer(&priv->timer); + break; } - spin_unlock_irqrestore(&priv->lock, flags); - break; - default: - break; + } + spin_unlock_irqrestore(&priv->lock, flags); + break; + default: + break; } -} /* TLan_Timer */ +} @@ -1919,39 +1862,39 @@ static void TLan_Timer( unsigned long data ) /***************************************************************************** ****************************************************************************** - ThunderLAN Driver Adapter Related Routines +ThunderLAN driver adapter related routines ****************************************************************************** *****************************************************************************/ - /*************************************************************** - * TLan_ResetLists - * - * Returns: - * Nothing - * Parms: - * dev The device structure with the list - * stuctures to be reset. - * - * This routine sets the variables associated with managing - * the TLAN lists to their initial values. - * - **************************************************************/ - -static void TLan_ResetLists( struct net_device *dev ) +/*************************************************************** + * tlan_reset_lists + * + * Returns: + * Nothing + * Parms: + * dev The device structure with the list + * stuctures to be reset. + * + * This routine sets the variables associated with managing + * the TLAN lists to their initial values. + * + **************************************************************/ + +static void tlan_reset_lists(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); int i; - TLanList *list; + struct tlan_list *list; dma_addr_t list_phys; struct sk_buff *skb; - priv->txHead = 0; - priv->txTail = 0; - for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { - list = priv->txList + i; - list->cStat = TLAN_CSTAT_UNUSED; + priv->tx_head = 0; + priv->tx_tail = 0; + for (i = 0; i < TLAN_NUM_TX_LISTS; i++) { + list = priv->tx_list + i; + list->c_stat = TLAN_CSTAT_UNUSED; list->buffer[0].address = 0; list->buffer[2].count = 0; list->buffer[2].address = 0; @@ -1959,169 +1902,169 @@ static void TLan_ResetLists( struct net_device *dev ) list->buffer[9].address = 0; } - priv->rxHead = 0; - priv->rxTail = TLAN_NUM_RX_LISTS - 1; - for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { - list = priv->rxList + i; - list_phys = priv->rxListDMA + sizeof(TLanList) * i; - list->cStat = TLAN_CSTAT_READY; - list->frameSize = TLAN_MAX_FRAME_SIZE; + priv->rx_head = 0; + priv->rx_tail = TLAN_NUM_RX_LISTS - 1; + for (i = 0; i < TLAN_NUM_RX_LISTS; i++) { + list = priv->rx_list + i; + list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i; + list->c_stat = TLAN_CSTAT_READY; + list->frame_size = TLAN_MAX_FRAME_SIZE; list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); - if ( !skb ) { - pr_err("TLAN: out of memory for received data.\n" ); + if (!skb) { + netdev_err(dev, "Out of memory for received data\n"); break; } - list->buffer[0].address = pci_map_single(priv->pciDev, + list->buffer[0].address = pci_map_single(priv->pci_dev, skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); - TLan_StoreSKB(list, skb); + tlan_store_skb(list, skb); list->buffer[1].count = 0; list->buffer[1].address = 0; - list->forward = list_phys + sizeof(TLanList); + list->forward = list_phys + sizeof(struct tlan_list); } /* in case ran out of memory early, clear bits */ while (i < TLAN_NUM_RX_LISTS) { - TLan_StoreSKB(priv->rxList + i, NULL); + tlan_store_skb(priv->rx_list + i, NULL); ++i; } list->forward = 0; -} /* TLan_ResetLists */ +} -static void TLan_FreeLists( struct net_device *dev ) +static void tlan_free_lists(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); int i; - TLanList *list; + struct tlan_list *list; struct sk_buff *skb; - for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) { - list = priv->txList + i; - skb = TLan_GetSKB(list); - if ( skb ) { + for (i = 0; i < TLAN_NUM_TX_LISTS; i++) { + list = priv->tx_list + i; + skb = tlan_get_skb(list); + if (skb) { pci_unmap_single( - priv->pciDev, + priv->pci_dev, list->buffer[0].address, max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE), PCI_DMA_TODEVICE); - dev_kfree_skb_any( skb ); + dev_kfree_skb_any(skb); list->buffer[8].address = 0; list->buffer[9].address = 0; } } - for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) { - list = priv->rxList + i; - skb = TLan_GetSKB(list); - if ( skb ) { - pci_unmap_single(priv->pciDev, + for (i = 0; i < TLAN_NUM_RX_LISTS; i++) { + list = priv->rx_list + i; + skb = tlan_get_skb(list); + if (skb) { + pci_unmap_single(priv->pci_dev, list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); - dev_kfree_skb_any( skb ); + dev_kfree_skb_any(skb); list->buffer[8].address = 0; list->buffer[9].address = 0; } } -} /* TLan_FreeLists */ +} - /*************************************************************** - * TLan_PrintDio - * - * Returns: - * Nothing - * Parms: - * io_base Base IO port of the device of - * which to print DIO registers. - * - * This function prints out all the internal (DIO) - * registers of a TLAN chip. - * - **************************************************************/ +/*************************************************************** + * tlan_print_dio + * + * Returns: + * Nothing + * Parms: + * io_base Base IO port of the device of + * which to print DIO registers. + * + * This function prints out all the internal (DIO) + * registers of a TLAN chip. + * + **************************************************************/ -static void TLan_PrintDio( u16 io_base ) +static void tlan_print_dio(u16 io_base) { u32 data0, data1; int i; - printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", - io_base ); - printk( "TLAN: Off. +0 +4\n" ); - for ( i = 0; i < 0x4C; i+= 8 ) { - data0 = TLan_DioRead32( io_base, i ); - data1 = TLan_DioRead32( io_base, i + 0x4 ); - printk( "TLAN: 0x%02x 0x%08x 0x%08x\n", i, data0, data1 ); + pr_info("Contents of internal registers for io base 0x%04hx\n", + io_base); + pr_info("Off. +0 +4\n"); + for (i = 0; i < 0x4C; i += 8) { + data0 = tlan_dio_read32(io_base, i); + data1 = tlan_dio_read32(io_base, i + 0x4); + pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1); } -} /* TLan_PrintDio */ +} - /*************************************************************** - * TLan_PrintList - * - * Returns: - * Nothing - * Parms: - * list A pointer to the TLanList structure to - * be printed. - * type A string to designate type of list, - * "Rx" or "Tx". - * num The index of the list. - * - * This function prints out the contents of the list - * pointed to by the list parameter. - * - **************************************************************/ +/*************************************************************** + * TLan_PrintList + * + * Returns: + * Nothing + * Parms: + * list A pointer to the struct tlan_list structure to + * be printed. + * type A string to designate type of list, + * "Rx" or "Tx". + * num The index of the list. + * + * This function prints out the contents of the list + * pointed to by the list parameter. + * + **************************************************************/ -static void TLan_PrintList( TLanList *list, char *type, int num) +static void tlan_print_list(struct tlan_list *list, char *type, int num) { int i; - printk( "TLAN: %s List %d at %p\n", type, num, list ); - printk( "TLAN: Forward = 0x%08x\n", list->forward ); - printk( "TLAN: CSTAT = 0x%04hx\n", list->cStat ); - printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize ); - /* for ( i = 0; i < 10; i++ ) { */ - for ( i = 0; i < 2; i++ ) { - printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", - i, list->buffer[i].count, list->buffer[i].address ); + pr_info("%s List %d at %p\n", type, num, list); + pr_info(" Forward = 0x%08x\n", list->forward); + pr_info(" CSTAT = 0x%04hx\n", list->c_stat); + pr_info(" Frame Size = 0x%04hx\n", list->frame_size); + /* for (i = 0; i < 10; i++) { */ + for (i = 0; i < 2; i++) { + pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n", + i, list->buffer[i].count, list->buffer[i].address); } -} /* TLan_PrintList */ +} - /*************************************************************** - * TLan_ReadAndClearStats - * - * Returns: - * Nothing - * Parms: - * dev Pointer to device structure of adapter - * to which to read stats. - * record Flag indicating whether to add - * - * This functions reads all the internal status registers - * of the TLAN chip, which clears them as a side effect. - * It then either adds the values to the device's status - * struct, or discards them, depending on whether record - * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0). - * - **************************************************************/ +/*************************************************************** + * tlan_read_and_clear_stats + * + * Returns: + * Nothing + * Parms: + * dev Pointer to device structure of adapter + * to which to read stats. + * record Flag indicating whether to add + * + * This functions reads all the internal status registers + * of the TLAN chip, which clears them as a side effect. + * It then either adds the values to the device's status + * struct, or discards them, depending on whether record + * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0). + * + **************************************************************/ -static void TLan_ReadAndClearStats( struct net_device *dev, int record ) +static void tlan_read_and_clear_stats(struct net_device *dev, int record) { u32 tx_good, tx_under; u32 rx_good, rx_over; @@ -2129,41 +2072,42 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record ) u32 multi_col, single_col; u32 excess_col, late_col, loss; - outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR ); - tx_good = inb( dev->base_addr + TLAN_DIO_DATA ); - tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; - tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; - tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); - - outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR ); - rx_good = inb( dev->base_addr + TLAN_DIO_DATA ); - rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; - rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16; - rx_over = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); - - outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR ); - def_tx = inb( dev->base_addr + TLAN_DIO_DATA ); - def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; - crc = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); - code = inb( dev->base_addr + TLAN_DIO_DATA + 3 ); - - outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); - multi_col = inb( dev->base_addr + TLAN_DIO_DATA ); - multi_col += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8; - single_col = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); - single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8; - - outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR ); - excess_col = inb( dev->base_addr + TLAN_DIO_DATA ); - late_col = inb( dev->base_addr + TLAN_DIO_DATA + 1 ); - loss = inb( dev->base_addr + TLAN_DIO_DATA + 2 ); - - if ( record ) { + outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR); + tx_good = inb(dev->base_addr + TLAN_DIO_DATA); + tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; + tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16; + tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3); + + outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR); + rx_good = inb(dev->base_addr + TLAN_DIO_DATA); + rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; + rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16; + rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3); + + outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR); + def_tx = inb(dev->base_addr + TLAN_DIO_DATA); + def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; + crc = inb(dev->base_addr + TLAN_DIO_DATA + 2); + code = inb(dev->base_addr + TLAN_DIO_DATA + 3); + + outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR); + multi_col = inb(dev->base_addr + TLAN_DIO_DATA); + multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; + single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2); + single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8; + + outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR); + excess_col = inb(dev->base_addr + TLAN_DIO_DATA); + late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1); + loss = inb(dev->base_addr + TLAN_DIO_DATA + 2); + + if (record) { dev->stats.rx_packets += rx_good; dev->stats.rx_errors += rx_over + crc + code; dev->stats.tx_packets += tx_good; dev->stats.tx_errors += tx_under + loss; - dev->stats.collisions += multi_col + single_col + excess_col + late_col; + dev->stats.collisions += multi_col + + single_col + excess_col + late_col; dev->stats.rx_over_errors += rx_over; dev->stats.rx_crc_errors += crc; @@ -2173,39 +2117,39 @@ static void TLan_ReadAndClearStats( struct net_device *dev, int record ) dev->stats.tx_carrier_errors += loss; } -} /* TLan_ReadAndClearStats */ +} - /*************************************************************** - * TLan_Reset - * - * Returns: - * 0 - * Parms: - * dev Pointer to device structure of adapter - * to be reset. - * - * This function resets the adapter and it's physical - * device. See Chap. 3, pp. 9-10 of the "ThunderLAN - * Programmer's Guide" for details. The routine tries to - * implement what is detailed there, though adjustments - * have been made. - * - **************************************************************/ +/*************************************************************** + * TLan_Reset + * + * Returns: + * 0 + * Parms: + * dev Pointer to device structure of adapter + * to be reset. + * + * This function resets the adapter and it's physical + * device. See Chap. 3, pp. 9-10 of the "ThunderLAN + * Programmer's Guide" for details. The routine tries to + * implement what is detailed there, though adjustments + * have been made. + * + **************************************************************/ static void -TLan_ResetAdapter( struct net_device *dev ) +tlan_reset_adapter(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); int i; u32 addr; u32 data; u8 data8; - priv->tlanFullDuplex = false; - priv->phyOnline=0; + priv->tlan_full_duplex = false; + priv->phy_online = 0; netif_carrier_off(dev); /* 1. Assert reset bit. */ @@ -2216,7 +2160,7 @@ TLan_ResetAdapter( struct net_device *dev ) udelay(1000); -/* 2. Turn off interrupts. ( Probably isn't necessary ) */ +/* 2. Turn off interrupts. (Probably isn't necessary) */ data = inl(dev->base_addr + TLAN_HOST_CMD); data |= TLAN_HC_INT_OFF; @@ -2224,207 +2168,204 @@ TLan_ResetAdapter( struct net_device *dev ) /* 3. Clear AREGs and HASHs. */ - for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) { - TLan_DioWrite32( dev->base_addr, (u16) i, 0 ); - } + for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4) + tlan_dio_write32(dev->base_addr, (u16) i, 0); /* 4. Setup NetConfig register. */ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; - TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data ); + tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data); /* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */ - outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD ); - outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD ); + outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD); + outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD); /* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */ - outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR ); + outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; - TLan_SetBit( TLAN_NET_SIO_NMRST, addr ); + tlan_set_bit(TLAN_NET_SIO_NMRST, addr); /* 7. Setup the remaining registers. */ - if ( priv->tlanRev >= 0x30 ) { + if (priv->tlan_rev >= 0x30) { data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC; - TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 ); + tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8); } - TLan_PhyDetect( dev ); + tlan_phy_detect(dev); data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN; - if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) { + if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) { data |= TLAN_NET_CFG_BIT; - if ( priv->aui == 1 ) { - TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a ); - } else if ( priv->duplex == TLAN_DUPLEX_FULL ) { - TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 ); - priv->tlanFullDuplex = true; + if (priv->aui == 1) { + tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a); + } else if (priv->duplex == TLAN_DUPLEX_FULL) { + tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00); + priv->tlan_full_duplex = true; } else { - TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 ); + tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08); } } - if ( priv->phyNum == 0 ) { + if (priv->phy_num == 0) data |= TLAN_NET_CFG_PHY_EN; - } - TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data ); + tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data); - if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { - TLan_FinishReset( dev ); - } else { - TLan_PhyPowerDown( dev ); - } + if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) + tlan_finish_reset(dev); + else + tlan_phy_power_down(dev); -} /* TLan_ResetAdapter */ +} static void -TLan_FinishReset( struct net_device *dev ) +tlan_finish_reset(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u8 data; u32 phy; u8 sio; u16 status; u16 partner; u16 tlphy_ctl; - u16 tlphy_par; + u16 tlphy_par; u16 tlphy_id1, tlphy_id2; - int i; + int i; - phy = priv->phy[priv->phyNum]; + phy = priv->phy[priv->phy_num]; data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP; - if ( priv->tlanFullDuplex ) { + if (priv->tlan_full_duplex) data |= TLAN_NET_CMD_DUPLEX; - } - TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data ); + tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data); data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5; - if ( priv->phyNum == 0 ) { + if (priv->phy_num == 0) data |= TLAN_NET_MASK_MASK7; - } - TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data ); - TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 ); - TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 ); - TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 ); + tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data); + tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7); + tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1); + tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2); - if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || - ( priv->aui ) ) { + if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) || + (priv->aui)) { status = MII_GS_LINK; - printk( "TLAN: %s: Link forced.\n", dev->name ); + netdev_info(dev, "Link forced\n"); } else { - TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); - udelay( 1000 ); - TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); - if ( (status & MII_GS_LINK) && - /* We only support link info on Nat.Sem. PHY's */ - (tlphy_id1 == NAT_SEM_ID1) && - (tlphy_id2 == NAT_SEM_ID2) ) { - TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner ); - TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par ); - - printk( "TLAN: %s: Link active with ", dev->name ); - if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) { - printk( "forced 10%sMbps %s-Duplex\n", - tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", - tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); - } else { - printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n", - tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", - tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); - printk("TLAN: Partner capability: "); - for (i = 5; i <= 10; i++) - if (partner & (1<<i)) - printk("%s",media[i-5]); - printk("\n"); + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); + udelay(1000); + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); + if ((status & MII_GS_LINK) && + /* We only support link info on Nat.Sem. PHY's */ + (tlphy_id1 == NAT_SEM_ID1) && + (tlphy_id2 == NAT_SEM_ID2)) { + tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner); + tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par); + + netdev_info(dev, + "Link active with %s %uMbps %s-Duplex\n", + !(tlphy_par & TLAN_PHY_AN_EN_STAT) + ? "forced" : "Autonegotiation enabled,", + tlphy_par & TLAN_PHY_SPEED_100 + ? 100 : 10, + tlphy_par & TLAN_PHY_DUPLEX_FULL + ? "Full" : "Half"); + + if (tlphy_par & TLAN_PHY_AN_EN_STAT) { + netdev_info(dev, "Partner capability:"); + for (i = 5; i < 10; i++) + if (partner & (1 << i)) + pr_cont(" %s", media[i-5]); + pr_cont("\n"); } - TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); + tlan_dio_write8(dev->base_addr, TLAN_LED_REG, + TLAN_LED_LINK); #ifdef MONITOR /* We have link beat..for now anyway */ - priv->link = 1; - /*Enabling link beat monitoring */ - TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT ); + priv->link = 1; + /*Enabling link beat monitoring */ + tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT); #endif } else if (status & MII_GS_LINK) { - printk( "TLAN: %s: Link active\n", dev->name ); - TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); + netdev_info(dev, "Link active\n"); + tlan_dio_write8(dev->base_addr, TLAN_LED_REG, + TLAN_LED_LINK); } } - if ( priv->phyNum == 0 ) { - TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); - tlphy_ctl |= TLAN_TC_INTEN; - TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl ); - sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO ); - sio |= TLAN_NET_SIO_MINTEN; - TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio ); - } - - if ( status & MII_GS_LINK ) { - TLan_SetMac( dev, 0, dev->dev_addr ); - priv->phyOnline = 1; - outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); - if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) { - outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 ); - } - outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM ); - outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD ); + if (priv->phy_num == 0) { + tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl); + tlphy_ctl |= TLAN_TC_INTEN; + tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); + sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO); + sio |= TLAN_NET_SIO_MINTEN; + tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio); + } + + if (status & MII_GS_LINK) { + tlan_set_mac(dev, 0, dev->dev_addr); + priv->phy_online = 1; + outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1); + if (debug >= 1 && debug != TLAN_DEBUG_PROBE) + outb((TLAN_HC_REQ_INT >> 8), + dev->base_addr + TLAN_HOST_CMD + 1); + outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM); + outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD); netif_carrier_on(dev); } else { - printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", - dev->name ); - TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET ); + netdev_info(dev, "Link inactive, will retry in 10 secs...\n"); + tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET); return; } - TLan_SetMulticastList(dev); + tlan_set_multicast_list(dev); -} /* TLan_FinishReset */ +} - /*************************************************************** - * TLan_SetMac - * - * Returns: - * Nothing - * Parms: - * dev Pointer to device structure of adapter - * on which to change the AREG. - * areg The AREG to set the address in (0 - 3). - * mac A pointer to an array of chars. Each - * element stores one byte of the address. - * IE, it isn't in ascii. - * - * This function transfers a MAC address to one of the - * TLAN AREGs (address registers). The TLAN chip locks - * the register on writing to offset 0 and unlocks the - * register after writing to offset 5. If NULL is passed - * in mac, then the AREG is filled with 0's. - * - **************************************************************/ +/*************************************************************** + * tlan_set_mac + * + * Returns: + * Nothing + * Parms: + * dev Pointer to device structure of adapter + * on which to change the AREG. + * areg The AREG to set the address in (0 - 3). + * mac A pointer to an array of chars. Each + * element stores one byte of the address. + * IE, it isn't in ascii. + * + * This function transfers a MAC address to one of the + * TLAN AREGs (address registers). The TLAN chip locks + * the register on writing to offset 0 and unlocks the + * register after writing to offset 5. If NULL is passed + * in mac, then the AREG is filled with 0's. + * + **************************************************************/ -static void TLan_SetMac( struct net_device *dev, int areg, char *mac ) +static void tlan_set_mac(struct net_device *dev, int areg, char *mac) { int i; areg *= 6; - if ( mac != NULL ) { - for ( i = 0; i < 6; i++ ) - TLan_DioWrite8( dev->base_addr, - TLAN_AREG_0 + areg + i, mac[i] ); + if (mac != NULL) { + for (i = 0; i < 6; i++) + tlan_dio_write8(dev->base_addr, + TLAN_AREG_0 + areg + i, mac[i]); } else { - for ( i = 0; i < 6; i++ ) - TLan_DioWrite8( dev->base_addr, - TLAN_AREG_0 + areg + i, 0 ); + for (i = 0; i < 6; i++) + tlan_dio_write8(dev->base_addr, + TLAN_AREG_0 + areg + i, 0); } -} /* TLan_SetMac */ +} @@ -2432,205 +2373,199 @@ static void TLan_SetMac( struct net_device *dev, int areg, char *mac ) /***************************************************************************** ****************************************************************************** - ThunderLAN Driver PHY Layer Routines +ThunderLAN driver PHY layer routines ****************************************************************************** *****************************************************************************/ - /********************************************************************* - * TLan_PhyPrint - * - * Returns: - * Nothing - * Parms: - * dev A pointer to the device structure of the - * TLAN device having the PHYs to be detailed. - * - * This function prints the registers a PHY (aka transceiver). - * - ********************************************************************/ +/********************************************************************* + * tlan_phy_print + * + * Returns: + * Nothing + * Parms: + * dev A pointer to the device structure of the + * TLAN device having the PHYs to be detailed. + * + * This function prints the registers a PHY (aka transceiver). + * + ********************************************************************/ -static void TLan_PhyPrint( struct net_device *dev ) +static void tlan_phy_print(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u16 i, data0, data1, data2, data3, phy; - phy = priv->phy[priv->phyNum]; - - if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { - printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name ); - } else if ( phy <= TLAN_PHY_MAX_ADDR ) { - printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy ); - printk( "TLAN: Off. +0 +1 +2 +3\n" ); - for ( i = 0; i < 0x20; i+= 4 ) { - printk( "TLAN: 0x%02x", i ); - TLan_MiiReadReg( dev, phy, i, &data0 ); - printk( " 0x%04hx", data0 ); - TLan_MiiReadReg( dev, phy, i + 1, &data1 ); - printk( " 0x%04hx", data1 ); - TLan_MiiReadReg( dev, phy, i + 2, &data2 ); - printk( " 0x%04hx", data2 ); - TLan_MiiReadReg( dev, phy, i + 3, &data3 ); - printk( " 0x%04hx\n", data3 ); + phy = priv->phy[priv->phy_num]; + + if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) { + netdev_info(dev, "Unmanaged PHY\n"); + } else if (phy <= TLAN_PHY_MAX_ADDR) { + netdev_info(dev, "PHY 0x%02x\n", phy); + pr_info(" Off. +0 +1 +2 +3\n"); + for (i = 0; i < 0x20; i += 4) { + tlan_mii_read_reg(dev, phy, i, &data0); + tlan_mii_read_reg(dev, phy, i + 1, &data1); + tlan_mii_read_reg(dev, phy, i + 2, &data2); + tlan_mii_read_reg(dev, phy, i + 3, &data3); + pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n", + i, data0, data1, data2, data3); } } else { - printk( "TLAN: Device %s, Invalid PHY.\n", dev->name ); + netdev_info(dev, "Invalid PHY\n"); } -} /* TLan_PhyPrint */ +} - /********************************************************************* - * TLan_PhyDetect - * - * Returns: - * Nothing - * Parms: - * dev A pointer to the device structure of the adapter - * for which the PHY needs determined. - * - * So far I've found that adapters which have external PHYs - * may also use the internal PHY for part of the functionality. - * (eg, AUI/Thinnet). This function finds out if this TLAN - * chip has an internal PHY, and then finds the first external - * PHY (starting from address 0) if it exists). - * - ********************************************************************/ +/********************************************************************* + * tlan_phy_detect + * + * Returns: + * Nothing + * Parms: + * dev A pointer to the device structure of the adapter + * for which the PHY needs determined. + * + * So far I've found that adapters which have external PHYs + * may also use the internal PHY for part of the functionality. + * (eg, AUI/Thinnet). This function finds out if this TLAN + * chip has an internal PHY, and then finds the first external + * PHY (starting from address 0) if it exists). + * + ********************************************************************/ -static void TLan_PhyDetect( struct net_device *dev ) +static void tlan_phy_detect(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u16 control; u16 hi; u16 lo; u32 phy; - if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) { - priv->phyNum = 0xFFFF; + if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) { + priv->phy_num = 0xffff; return; } - TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi ); + tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi); - if ( hi != 0xFFFF ) { + if (hi != 0xffff) priv->phy[0] = TLAN_PHY_MAX_ADDR; - } else { + else priv->phy[0] = TLAN_PHY_NONE; - } priv->phy[1] = TLAN_PHY_NONE; - for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) { - TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control ); - TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi ); - TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo ); - if ( ( control != 0xFFFF ) || - ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) { - TLAN_DBG( TLAN_DEBUG_GNRL, - "PHY found at %02x %04x %04x %04x\n", - phy, control, hi, lo ); - if ( ( priv->phy[1] == TLAN_PHY_NONE ) && - ( phy != TLAN_PHY_MAX_ADDR ) ) { + for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) { + tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control); + tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi); + tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo); + if ((control != 0xffff) || + (hi != 0xffff) || (lo != 0xffff)) { + TLAN_DBG(TLAN_DEBUG_GNRL, + "PHY found at %02x %04x %04x %04x\n", + phy, control, hi, lo); + if ((priv->phy[1] == TLAN_PHY_NONE) && + (phy != TLAN_PHY_MAX_ADDR)) { priv->phy[1] = phy; } } } - if ( priv->phy[1] != TLAN_PHY_NONE ) { - priv->phyNum = 1; - } else if ( priv->phy[0] != TLAN_PHY_NONE ) { - priv->phyNum = 0; - } else { - printk( "TLAN: Cannot initialize device, no PHY was found!\n" ); - } + if (priv->phy[1] != TLAN_PHY_NONE) + priv->phy_num = 1; + else if (priv->phy[0] != TLAN_PHY_NONE) + priv->phy_num = 0; + else + netdev_info(dev, "Cannot initialize device, no PHY was found!\n"); -} /* TLan_PhyDetect */ +} -static void TLan_PhyPowerDown( struct net_device *dev ) +static void tlan_phy_power_down(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u16 value; - TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name ); + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name); value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; - TLan_MiiSync( dev->base_addr ); - TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); - if ( ( priv->phyNum == 0 ) && - ( priv->phy[1] != TLAN_PHY_NONE ) && - ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) { - TLan_MiiSync( dev->base_addr ); - TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value ); + tlan_mii_sync(dev->base_addr); + tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value); + if ((priv->phy_num == 0) && + (priv->phy[1] != TLAN_PHY_NONE) && + (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) { + tlan_mii_sync(dev->base_addr); + tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value); } /* Wait for 50 ms and powerup * This is abitrary. It is intended to make sure the * transceiver settles. */ - TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP ); + tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP); -} /* TLan_PhyPowerDown */ +} -static void TLan_PhyPowerUp( struct net_device *dev ) +static void tlan_phy_power_up(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u16 value; - TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name ); - TLan_MiiSync( dev->base_addr ); + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name); + tlan_mii_sync(dev->base_addr); value = MII_GC_LOOPBK; - TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); - TLan_MiiSync(dev->base_addr); + tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value); + tlan_mii_sync(dev->base_addr); /* Wait for 500 ms and reset the * transceiver. The TLAN docs say both 50 ms and * 500 ms, so do the longer, just in case. */ - TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET ); + tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET); -} /* TLan_PhyPowerUp */ +} -static void TLan_PhyReset( struct net_device *dev ) +static void tlan_phy_reset(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u16 phy; u16 value; - phy = priv->phy[priv->phyNum]; + phy = priv->phy[priv->phy_num]; - TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name ); - TLan_MiiSync( dev->base_addr ); + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name); + tlan_mii_sync(dev->base_addr); value = MII_GC_LOOPBK | MII_GC_RESET; - TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value ); - TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); - while ( value & MII_GC_RESET ) { - TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value ); - } + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value); + tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value); + while (value & MII_GC_RESET) + tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value); /* Wait for 500 ms and initialize. * I don't remember why I wait this long. * I've changed this to 50ms, as it seems long enough. */ - TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK ); + tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK); -} /* TLan_PhyReset */ +} -static void TLan_PhyStartLink( struct net_device *dev ) +static void tlan_phy_start_link(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u16 ability; u16 control; u16 data; @@ -2638,86 +2573,87 @@ static void TLan_PhyStartLink( struct net_device *dev ) u16 status; u16 tctl; - phy = priv->phy[priv->phyNum]; - TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name ); - TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); - TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability ); + phy = priv->phy[priv->phy_num]; + TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name); + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability); - if ( ( status & MII_GS_AUTONEG ) && - ( ! priv->aui ) ) { + if ((status & MII_GS_AUTONEG) && + (!priv->aui)) { ability = status >> 11; - if ( priv->speed == TLAN_SPEED_10 && - priv->duplex == TLAN_DUPLEX_HALF) { - TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000); - } else if ( priv->speed == TLAN_SPEED_10 && - priv->duplex == TLAN_DUPLEX_FULL) { - priv->tlanFullDuplex = true; - TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100); - } else if ( priv->speed == TLAN_SPEED_100 && - priv->duplex == TLAN_DUPLEX_HALF) { - TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000); - } else if ( priv->speed == TLAN_SPEED_100 && - priv->duplex == TLAN_DUPLEX_FULL) { - priv->tlanFullDuplex = true; - TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100); + if (priv->speed == TLAN_SPEED_10 && + priv->duplex == TLAN_DUPLEX_HALF) { + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000); + } else if (priv->speed == TLAN_SPEED_10 && + priv->duplex == TLAN_DUPLEX_FULL) { + priv->tlan_full_duplex = true; + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100); + } else if (priv->speed == TLAN_SPEED_100 && + priv->duplex == TLAN_DUPLEX_HALF) { + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000); + } else if (priv->speed == TLAN_SPEED_100 && + priv->duplex == TLAN_DUPLEX_FULL) { + priv->tlan_full_duplex = true; + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100); } else { /* Set Auto-Neg advertisement */ - TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1); + tlan_mii_write_reg(dev, phy, MII_AN_ADV, + (ability << 5) | 1); /* Enablee Auto-Neg */ - TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 ); + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000); /* Restart Auto-Neg */ - TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 ); + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200); /* Wait for 4 sec for autonegotiation - * to complete. The max spec time is less than this - * but the card need additional time to start AN. - * .5 sec should be plenty extra. - */ - printk( "TLAN: %s: Starting autonegotiation.\n", dev->name ); - TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN ); + * to complete. The max spec time is less than this + * but the card need additional time to start AN. + * .5 sec should be plenty extra. + */ + netdev_info(dev, "Starting autonegotiation\n"); + tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN); return; } } - if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) { - priv->phyNum = 0; - data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; - TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); - TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN ); + if ((priv->aui) && (priv->phy_num != 0)) { + priv->phy_num = 0; + data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN + | TLAN_NET_CFG_PHY_EN; + tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data); + tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN); return; - } else if ( priv->phyNum == 0 ) { + } else if (priv->phy_num == 0) { control = 0; - TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl ); - if ( priv->aui ) { - tctl |= TLAN_TC_AUISEL; + tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl); + if (priv->aui) { + tctl |= TLAN_TC_AUISEL; } else { - tctl &= ~TLAN_TC_AUISEL; - if ( priv->duplex == TLAN_DUPLEX_FULL ) { + tctl &= ~TLAN_TC_AUISEL; + if (priv->duplex == TLAN_DUPLEX_FULL) { control |= MII_GC_DUPLEX; - priv->tlanFullDuplex = true; + priv->tlan_full_duplex = true; } - if ( priv->speed == TLAN_SPEED_100 ) { + if (priv->speed == TLAN_SPEED_100) control |= MII_GC_SPEEDSEL; - } } - TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control ); - TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl ); + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control); + tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl); } /* Wait for 2 sec to give the transceiver time * to establish link. */ - TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET ); + tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET); -} /* TLan_PhyStartLink */ +} -static void TLan_PhyFinishAutoNeg( struct net_device *dev ) +static void tlan_phy_finish_auto_neg(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u16 an_adv; u16 an_lpa; u16 data; @@ -2725,115 +2661,118 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev ) u16 phy; u16 status; - phy = priv->phy[priv->phyNum]; + phy = priv->phy[priv->phy_num]; - TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); - udelay( 1000 ); - TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); + udelay(1000); + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); - if ( ! ( status & MII_GS_AUTOCMPLT ) ) { + if (!(status & MII_GS_AUTOCMPLT)) { /* Wait for 8 sec to give the process * more time. Perhaps we should fail after a while. */ - if (!priv->neg_be_verbose++) { - pr_info("TLAN: Giving autonegotiation more time.\n"); - pr_info("TLAN: Please check that your adapter has\n"); - pr_info("TLAN: been properly connected to a HUB or Switch.\n"); - pr_info("TLAN: Trying to establish link in the background...\n"); - } - TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN ); + if (!priv->neg_be_verbose++) { + pr_info("Giving autonegotiation more time.\n"); + pr_info("Please check that your adapter has\n"); + pr_info("been properly connected to a HUB or Switch.\n"); + pr_info("Trying to establish link in the background...\n"); + } + tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN); return; } - printk( "TLAN: %s: Autonegotiation complete.\n", dev->name ); - TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv ); - TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa ); + netdev_info(dev, "Autonegotiation complete\n"); + tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv); + tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa); mode = an_adv & an_lpa & 0x03E0; - if ( mode & 0x0100 ) { - priv->tlanFullDuplex = true; - } else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) { - priv->tlanFullDuplex = true; - } - - if ( ( ! ( mode & 0x0180 ) ) && - ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && - ( priv->phyNum != 0 ) ) { - priv->phyNum = 0; - data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; - TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); - TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN ); + if (mode & 0x0100) + priv->tlan_full_duplex = true; + else if (!(mode & 0x0080) && (mode & 0x0040)) + priv->tlan_full_duplex = true; + + if ((!(mode & 0x0180)) && + (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) && + (priv->phy_num != 0)) { + priv->phy_num = 0; + data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN + | TLAN_NET_CFG_PHY_EN; + tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data); + tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN); return; } - if ( priv->phyNum == 0 ) { - if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || - ( an_adv & an_lpa & 0x0040 ) ) { - TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, - MII_GC_AUTOENB | MII_GC_DUPLEX ); - pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" ); + if (priv->phy_num == 0) { + if ((priv->duplex == TLAN_DUPLEX_FULL) || + (an_adv & an_lpa & 0x0040)) { + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, + MII_GC_AUTOENB | MII_GC_DUPLEX); + netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n"); } else { - TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB ); - pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" ); + tlan_mii_write_reg(dev, phy, MII_GEN_CTL, + MII_GC_AUTOENB); + netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n"); } } /* Wait for 100 ms. No reason in partiticular. */ - TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET ); + tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET); -} /* TLan_PhyFinishAutoNeg */ +} #ifdef MONITOR - /********************************************************************* - * - * TLan_phyMonitor - * - * Returns: - * None - * - * Params: - * dev The device structure of this device. - * - * - * This function monitors PHY condition by reading the status - * register via the MII bus. This can be used to give info - * about link changes (up/down), and possible switch to alternate - * media. - * - * ******************************************************************/ - -void TLan_PhyMonitor( struct net_device *dev ) +/********************************************************************* + * + * tlan_phy_monitor + * + * Returns: + * None + * + * Params: + * dev The device structure of this device. + * + * + * This function monitors PHY condition by reading the status + * register via the MII bus. This can be used to give info + * about link changes (up/down), and possible switch to alternate + * media. + * + *******************************************************************/ + +void tlan_phy_monitor(struct net_device *dev) { - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); u16 phy; u16 phy_status; - phy = priv->phy[priv->phyNum]; + phy = priv->phy[priv->phy_num]; - /* Get PHY status register */ - TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status ); + /* Get PHY status register */ + tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status); - /* Check if link has been lost */ - if (!(phy_status & MII_GS_LINK)) { - if (priv->link) { - priv->link = 0; - printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name); - netif_carrier_off(dev); - TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); - return; + /* Check if link has been lost */ + if (!(phy_status & MII_GS_LINK)) { + if (priv->link) { + priv->link = 0; + printk(KERN_DEBUG "TLAN: %s has lost link\n", + dev->name); + netif_carrier_off(dev); + tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT); + return; } } - /* Link restablished? */ - if ((phy_status & MII_GS_LINK) && !priv->link) { - priv->link = 1; - printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name); + /* Link restablished? */ + if ((phy_status & MII_GS_LINK) && !priv->link) { + priv->link = 1; + printk(KERN_DEBUG "TLAN: %s has reestablished link\n", + dev->name); netif_carrier_on(dev); - } + } /* Setup a new monitor */ - TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT ); + tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT); } #endif /* MONITOR */ @@ -2842,47 +2781,48 @@ void TLan_PhyMonitor( struct net_device *dev ) /***************************************************************************** ****************************************************************************** - ThunderLAN Driver MII Routines +ThunderLAN driver MII routines - These routines are based on the information in Chap. 2 of the - "ThunderLAN Programmer's Guide", pp. 15-24. +these routines are based on the information in chap. 2 of the +"ThunderLAN Programmer's Guide", pp. 15-24. ****************************************************************************** *****************************************************************************/ - /*************************************************************** - * TLan_MiiReadReg - * - * Returns: - * false if ack received ok - * true if no ack received or other error - * - * Parms: - * dev The device structure containing - * The io address and interrupt count - * for this device. - * phy The address of the PHY to be queried. - * reg The register whose contents are to be - * retrieved. - * val A pointer to a variable to store the - * retrieved value. - * - * This function uses the TLAN's MII bus to retrieve the contents - * of a given register on a PHY. It sends the appropriate info - * and then reads the 16-bit register value from the MII bus via - * the TLAN SIO register. - * - **************************************************************/ - -static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val ) +/*************************************************************** + * tlan_mii_read_reg + * + * Returns: + * false if ack received ok + * true if no ack received or other error + * + * Parms: + * dev The device structure containing + * The io address and interrupt count + * for this device. + * phy The address of the PHY to be queried. + * reg The register whose contents are to be + * retrieved. + * val A pointer to a variable to store the + * retrieved value. + * + * This function uses the TLAN's MII bus to retrieve the contents + * of a given register on a PHY. It sends the appropriate info + * and then reads the 16-bit register value from the MII bus via + * the TLAN SIO register. + * + **************************************************************/ + +static bool +tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val) { u8 nack; u16 sio, tmp; - u32 i; + u32 i; bool err; int minten; - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); unsigned long flags = 0; err = false; @@ -2892,48 +2832,48 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val if (!in_irq()) spin_lock_irqsave(&priv->lock, flags); - TLan_MiiSync(dev->base_addr); + tlan_mii_sync(dev->base_addr); - minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); - if ( minten ) - TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio); + minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio); + if (minten) + tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio); - TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ - TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Read ( 10b ) */ - TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ - TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ + tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */ + tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */ + tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */ + tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */ - TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio); /* Change direction */ + tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */ - TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Clock Idle bit */ - TLan_SetBit(TLAN_NET_SIO_MCLK, sio); - TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Wait 300ns */ + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */ + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */ - nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio); /* Check for ACK */ - TLan_SetBit(TLAN_NET_SIO_MCLK, sio); /* Finish ACK */ - if (nack) { /* No ACK, so fake it */ + nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */ + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */ + if (nack) { /* no ACK, so fake it */ for (i = 0; i < 16; i++) { - TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); - TLan_SetBit(TLAN_NET_SIO_MCLK, sio); + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); } tmp = 0xffff; err = true; } else { /* ACK, so read data */ for (tmp = 0, i = 0x8000; i; i >>= 1) { - TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); - if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio)) + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); + if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio)) tmp |= i; - TLan_SetBit(TLAN_NET_SIO_MCLK, sio); + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); } } - TLan_ClearBit(TLAN_NET_SIO_MCLK, sio); /* Idle cycle */ - TLan_SetBit(TLAN_NET_SIO_MCLK, sio); + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */ + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); - if ( minten ) - TLan_SetBit(TLAN_NET_SIO_MINTEN, sio); + if (minten) + tlan_set_bit(TLAN_NET_SIO_MINTEN, sio); *val = tmp; @@ -2942,116 +2882,117 @@ static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val return err; -} /* TLan_MiiReadReg */ +} - /*************************************************************** - * TLan_MiiSendData - * - * Returns: - * Nothing - * Parms: - * base_port The base IO port of the adapter in - * question. - * dev The address of the PHY to be queried. - * data The value to be placed on the MII bus. - * num_bits The number of bits in data that are to - * be placed on the MII bus. - * - * This function sends on sequence of bits on the MII - * configuration bus. - * - **************************************************************/ +/*************************************************************** + * tlan_mii_send_data + * + * Returns: + * Nothing + * Parms: + * base_port The base IO port of the adapter in + * question. + * dev The address of the PHY to be queried. + * data The value to be placed on the MII bus. + * num_bits The number of bits in data that are to + * be placed on the MII bus. + * + * This function sends on sequence of bits on the MII + * configuration bus. + * + **************************************************************/ -static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits ) +static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits) { u16 sio; u32 i; - if ( num_bits == 0 ) + if (num_bits == 0) return; - outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); + outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR); sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; - TLan_SetBit( TLAN_NET_SIO_MTXEN, sio ); + tlan_set_bit(TLAN_NET_SIO_MTXEN, sio); - for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) { - TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); - (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); - if ( data & i ) - TLan_SetBit( TLAN_NET_SIO_MDATA, sio ); + for (i = (0x1 << (num_bits - 1)); i; i >>= 1) { + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); + (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio); + if (data & i) + tlan_set_bit(TLAN_NET_SIO_MDATA, sio); else - TLan_ClearBit( TLAN_NET_SIO_MDATA, sio ); - TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); - (void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio ); + tlan_clear_bit(TLAN_NET_SIO_MDATA, sio); + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); + (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio); } -} /* TLan_MiiSendData */ +} - /*************************************************************** - * TLan_MiiSync - * - * Returns: - * Nothing - * Parms: - * base_port The base IO port of the adapter in - * question. - * - * This functions syncs all PHYs in terms of the MII configuration - * bus. - * - **************************************************************/ +/*************************************************************** + * TLan_MiiSync + * + * Returns: + * Nothing + * Parms: + * base_port The base IO port of the adapter in + * question. + * + * This functions syncs all PHYs in terms of the MII configuration + * bus. + * + **************************************************************/ -static void TLan_MiiSync( u16 base_port ) +static void tlan_mii_sync(u16 base_port) { int i; u16 sio; - outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR ); + outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR); sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; - TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio ); - for ( i = 0; i < 32; i++ ) { - TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); - TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); + tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); + for (i = 0; i < 32; i++) { + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); } -} /* TLan_MiiSync */ +} - /*************************************************************** - * TLan_MiiWriteReg - * - * Returns: - * Nothing - * Parms: - * dev The device structure for the device - * to write to. - * phy The address of the PHY to be written to. - * reg The register whose contents are to be - * written. - * val The value to be written to the register. - * - * This function uses the TLAN's MII bus to write the contents of a - * given register on a PHY. It sends the appropriate info and then - * writes the 16-bit register value from the MII configuration bus - * via the TLAN SIO register. - * - **************************************************************/ +/*************************************************************** + * tlan_mii_write_reg + * + * Returns: + * Nothing + * Parms: + * dev The device structure for the device + * to write to. + * phy The address of the PHY to be written to. + * reg The register whose contents are to be + * written. + * val The value to be written to the register. + * + * This function uses the TLAN's MII bus to write the contents of a + * given register on a PHY. It sends the appropriate info and then + * writes the 16-bit register value from the MII configuration bus + * via the TLAN SIO register. + * + **************************************************************/ -static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val ) +static void +tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val) { u16 sio; int minten; unsigned long flags = 0; - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; @@ -3059,30 +3000,30 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val if (!in_irq()) spin_lock_irqsave(&priv->lock, flags); - TLan_MiiSync( dev->base_addr ); + tlan_mii_sync(dev->base_addr); - minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio ); - if ( minten ) - TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio ); + minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio); + if (minten) + tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio); - TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Start ( 01b ) */ - TLan_MiiSendData( dev->base_addr, 0x1, 2 ); /* Write ( 01b ) */ - TLan_MiiSendData( dev->base_addr, phy, 5 ); /* Device # */ - TLan_MiiSendData( dev->base_addr, reg, 5 ); /* Register # */ + tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */ + tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */ + tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */ + tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */ - TLan_MiiSendData( dev->base_addr, 0x2, 2 ); /* Send ACK */ - TLan_MiiSendData( dev->base_addr, val, 16 ); /* Send Data */ + tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */ + tlan_mii_send_data(dev->base_addr, val, 16); /* send data */ - TLan_ClearBit( TLAN_NET_SIO_MCLK, sio ); /* Idle cycle */ - TLan_SetBit( TLAN_NET_SIO_MCLK, sio ); + tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */ + tlan_set_bit(TLAN_NET_SIO_MCLK, sio); - if ( minten ) - TLan_SetBit( TLAN_NET_SIO_MINTEN, sio ); + if (minten) + tlan_set_bit(TLAN_NET_SIO_MINTEN, sio); if (!in_irq()) spin_unlock_irqrestore(&priv->lock, flags); -} /* TLan_MiiWriteReg */ +} @@ -3090,229 +3031,226 @@ static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val /***************************************************************************** ****************************************************************************** - ThunderLAN Driver Eeprom routines +ThunderLAN driver eeprom routines - The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A - EEPROM. These functions are based on information in Microchip's - data sheet. I don't know how well this functions will work with - other EEPROMs. +the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A +EEPROM. these functions are based on information in microchip's +data sheet. I don't know how well this functions will work with +other Eeproms. ****************************************************************************** *****************************************************************************/ - /*************************************************************** - * TLan_EeSendStart - * - * Returns: - * Nothing - * Parms: - * io_base The IO port base address for the - * TLAN device with the EEPROM to - * use. - * - * This function sends a start cycle to an EEPROM attached - * to a TLAN chip. - * - **************************************************************/ - -static void TLan_EeSendStart( u16 io_base ) +/*************************************************************** + * tlan_ee_send_start + * + * Returns: + * Nothing + * Parms: + * io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * + * This function sends a start cycle to an EEPROM attached + * to a TLAN chip. + * + **************************************************************/ + +static void tlan_ee_send_start(u16 io_base) { u16 sio; - outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); + outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; - TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); - TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); - TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); - TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); - TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); - -} /* TLan_EeSendStart */ - - - - - /*************************************************************** - * TLan_EeSendByte - * - * Returns: - * If the correct ack was received, 0, otherwise 1 - * Parms: io_base The IO port base address for the - * TLAN device with the EEPROM to - * use. - * data The 8 bits of information to - * send to the EEPROM. - * stop If TLAN_EEPROM_STOP is passed, a - * stop cycle is sent after the - * byte is sent after the ack is - * read. - * - * This function sends a byte on the serial EEPROM line, - * driving the clock to send each bit. The function then - * reverses transmission direction and reads an acknowledge - * bit. - * - **************************************************************/ - -static int TLan_EeSendByte( u16 io_base, u8 data, int stop ) + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_set_bit(TLAN_NET_SIO_EDATA, sio); + tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); + tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + +} + + + + +/*************************************************************** + * tlan_ee_send_byte + * + * Returns: + * If the correct ack was received, 0, otherwise 1 + * Parms: io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * data The 8 bits of information to + * send to the EEPROM. + * stop If TLAN_EEPROM_STOP is passed, a + * stop cycle is sent after the + * byte is sent after the ack is + * read. + * + * This function sends a byte on the serial EEPROM line, + * driving the clock to send each bit. The function then + * reverses transmission direction and reads an acknowledge + * bit. + * + **************************************************************/ + +static int tlan_ee_send_byte(u16 io_base, u8 data, int stop) { int err; u8 place; u16 sio; - outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); + outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; /* Assume clock is low, tx is enabled; */ - for ( place = 0x80; place != 0; place >>= 1 ) { - if ( place & data ) - TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); + for (place = 0x80; place != 0; place >>= 1) { + if (place & data) + tlan_set_bit(TLAN_NET_SIO_EDATA, sio); else - TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); - TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); - TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); + tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); } - TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); - TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); - err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio ); - TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); - TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); + tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio); + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio); + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); - if ( ( ! err ) && stop ) { + if ((!err) && stop) { /* STOP, raise data while clock is high */ - TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); - TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); - TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); + tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_set_bit(TLAN_NET_SIO_EDATA, sio); } return err; -} /* TLan_EeSendByte */ - - - - - /*************************************************************** - * TLan_EeReceiveByte - * - * Returns: - * Nothing - * Parms: - * io_base The IO port base address for the - * TLAN device with the EEPROM to - * use. - * data An address to a char to hold the - * data sent from the EEPROM. - * stop If TLAN_EEPROM_STOP is passed, a - * stop cycle is sent after the - * byte is received, and no ack is - * sent. - * - * This function receives 8 bits of data from the EEPROM - * over the serial link. It then sends and ack bit, or no - * ack and a stop bit. This function is used to retrieve - * data after the address of a byte in the EEPROM has been - * sent. - * - **************************************************************/ - -static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop ) +} + + + + +/*************************************************************** + * tlan_ee_receive_byte + * + * Returns: + * Nothing + * Parms: + * io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * data An address to a char to hold the + * data sent from the EEPROM. + * stop If TLAN_EEPROM_STOP is passed, a + * stop cycle is sent after the + * byte is received, and no ack is + * sent. + * + * This function receives 8 bits of data from the EEPROM + * over the serial link. It then sends and ack bit, or no + * ack and a stop bit. This function is used to retrieve + * data after the address of a byte in the EEPROM has been + * sent. + * + **************************************************************/ + +static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop) { u8 place; u16 sio; - outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR ); + outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; *data = 0; /* Assume clock is low, tx is enabled; */ - TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio ); - for ( place = 0x80; place; place >>= 1 ) { - TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); - if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) ) + tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio); + for (place = 0x80; place; place >>= 1) { + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio)) *data |= place; - TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); } - TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); - if ( ! stop ) { - TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* Ack = 0 */ - TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); - TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); + tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); + if (!stop) { + tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */ + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); } else { - TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */ - TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); - TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); + tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */ + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); /* STOP, raise data while clock is high */ - TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); - TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); - TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); - } - -} /* TLan_EeReceiveByte */ - - - - - /*************************************************************** - * TLan_EeReadByte - * - * Returns: - * No error = 0, else, the stage at which the error - * occurred. - * Parms: - * io_base The IO port base address for the - * TLAN device with the EEPROM to - * use. - * ee_addr The address of the byte in the - * EEPROM whose contents are to be - * retrieved. - * data An address to a char to hold the - * data obtained from the EEPROM. - * - * This function reads a byte of information from an byte - * cell in the EEPROM. - * - **************************************************************/ - -static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data ) + tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); + tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); + tlan_set_bit(TLAN_NET_SIO_EDATA, sio); + } + +} + + + + +/*************************************************************** + * tlan_ee_read_byte + * + * Returns: + * No error = 0, else, the stage at which the error + * occurred. + * Parms: + * io_base The IO port base address for the + * TLAN device with the EEPROM to + * use. + * ee_addr The address of the byte in the + * EEPROM whose contents are to be + * retrieved. + * data An address to a char to hold the + * data obtained from the EEPROM. + * + * This function reads a byte of information from an byte + * cell in the EEPROM. + * + **************************************************************/ + +static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data) { int err; - TLanPrivateInfo *priv = netdev_priv(dev); + struct tlan_priv *priv = netdev_priv(dev); unsigned long flags = 0; - int ret=0; + int ret = 0; spin_lock_irqsave(&priv->lock, flags); - TLan_EeSendStart( dev->base_addr ); - err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK ); - if (err) - { - ret=1; + tlan_ee_send_start(dev->base_addr); + err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK); + if (err) { + ret = 1; goto fail; } - err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK ); - if (err) - { - ret=2; + err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK); + if (err) { + ret = 2; goto fail; } - TLan_EeSendStart( dev->base_addr ); - err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK ); - if (err) - { - ret=3; + tlan_ee_send_start(dev->base_addr); + err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK); + if (err) { + ret = 3; goto fail; } - TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP ); + tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP); fail: spin_unlock_irqrestore(&priv->lock, flags); return ret; -} /* TLan_EeReadByte */ +} diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h index 3315ced774e2..5fc98a8e4889 100644 --- a/drivers/net/tlan.h +++ b/drivers/net/tlan.h @@ -20,8 +20,8 @@ ********************************************************************/ -#include <asm/io.h> -#include <asm/types.h> +#include <linux/io.h> +#include <linux/types.h> #include <linux/netdevice.h> @@ -40,8 +40,11 @@ #define TLAN_IGNORE 0 #define TLAN_RECORD 1 -#define TLAN_DBG(lvl, format, args...) \ - do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0) +#define TLAN_DBG(lvl, format, args...) \ + do { \ + if (debug&lvl) \ + printk(KERN_DEBUG "TLAN: " format, ##args); \ + } while (0) #define TLAN_DEBUG_GNRL 0x0001 #define TLAN_DEBUG_TX 0x0002 @@ -50,7 +53,8 @@ #define TLAN_DEBUG_PROBE 0x0010 #define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */ -#define MAX_TLAN_BOARDS 8 /* Max number of boards installed at a time */ +#define MAX_TLAN_BOARDS 8 /* Max number of boards installed + at a time */ /***************************************************************** @@ -70,13 +74,13 @@ #define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 #endif -typedef struct tlan_adapter_entry { - u16 vendorId; - u16 deviceId; - char *deviceLabel; +struct tlan_adapter_entry { + u16 vendor_id; + u16 device_id; + char *device_label; u32 flags; - u16 addrOfs; -} TLanAdapterEntry; + u16 addr_ofs; +}; #define TLAN_ADAPTER_NONE 0x00000000 #define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001 @@ -129,18 +133,18 @@ typedef struct tlan_adapter_entry { #define TLAN_CSTAT_DP_PR 0x0100 -typedef struct tlan_buffer_ref_tag { +struct tlan_buffer { u32 count; u32 address; -} TLanBufferRef; +}; -typedef struct tlan_list_tag { +struct tlan_list { u32 forward; - u16 cStat; - u16 frameSize; - TLanBufferRef buffer[TLAN_BUFFERS_PER_LIST]; -} TLanList; + u16 c_stat; + u16 frame_size; + struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST]; +}; typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; @@ -164,49 +168,49 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; * ****************************************************************/ -typedef struct tlan_private_tag { - struct net_device *nextDevice; - struct pci_dev *pciDev; +struct tlan_priv { + struct net_device *next_device; + struct pci_dev *pci_dev; struct net_device *dev; - void *dmaStorage; - dma_addr_t dmaStorageDMA; - unsigned int dmaSize; - u8 *padBuffer; - TLanList *rxList; - dma_addr_t rxListDMA; - u8 *rxBuffer; - dma_addr_t rxBufferDMA; - u32 rxHead; - u32 rxTail; - u32 rxEocCount; - TLanList *txList; - dma_addr_t txListDMA; - u8 *txBuffer; - dma_addr_t txBufferDMA; - u32 txHead; - u32 txInProgress; - u32 txTail; - u32 txBusyCount; - u32 phyOnline; - u32 timerSetAt; - u32 timerType; + void *dma_storage; + dma_addr_t dma_storage_dma; + unsigned int dma_size; + u8 *pad_buffer; + struct tlan_list *rx_list; + dma_addr_t rx_list_dma; + u8 *rx_buffer; + dma_addr_t rx_buffer_dma; + u32 rx_head; + u32 rx_tail; + u32 rx_eoc_count; + struct tlan_list *tx_list; + dma_addr_t tx_list_dma; + u8 *tx_buffer; + dma_addr_t tx_buffer_dma; + u32 tx_head; + u32 tx_in_progress; + u32 tx_tail; + u32 tx_busy_count; + u32 phy_online; + u32 timer_set_at; + u32 timer_type; struct timer_list timer; struct board *adapter; - u32 adapterRev; + u32 adapter_rev; u32 aui; u32 debug; u32 duplex; u32 phy[2]; - u32 phyNum; + u32 phy_num; u32 speed; - u8 tlanRev; - u8 tlanFullDuplex; + u8 tlan_rev; + u8 tlan_full_duplex; spinlock_t lock; u8 link; u8 is_eisa; struct work_struct tlan_tqueue; u8 neg_be_verbose; -} TLanPrivateInfo; +}; @@ -247,7 +251,7 @@ typedef struct tlan_private_tag { ****************************************************************/ #define TLAN_HOST_CMD 0x00 -#define TLAN_HC_GO 0x80000000 +#define TLAN_HC_GO 0x80000000 #define TLAN_HC_STOP 0x40000000 #define TLAN_HC_ACK 0x20000000 #define TLAN_HC_CS_MASK 0x1FE00000 @@ -283,7 +287,7 @@ typedef struct tlan_private_tag { #define TLAN_NET_CMD_TRFRAM 0x02 #define TLAN_NET_CMD_TXPACE 0x01 #define TLAN_NET_SIO 0x01 -#define TLAN_NET_SIO_MINTEN 0x80 +#define TLAN_NET_SIO_MINTEN 0x80 #define TLAN_NET_SIO_ECLOK 0x40 #define TLAN_NET_SIO_ETXEN 0x20 #define TLAN_NET_SIO_EDATA 0x10 @@ -304,7 +308,7 @@ typedef struct tlan_private_tag { #define TLAN_NET_MASK_MASK4 0x10 #define TLAN_NET_MASK_RSRVD 0x0F #define TLAN_NET_CONFIG 0x04 -#define TLAN_NET_CFG_RCLK 0x8000 +#define TLAN_NET_CFG_RCLK 0x8000 #define TLAN_NET_CFG_TCLK 0x4000 #define TLAN_NET_CFG_BIT 0x2000 #define TLAN_NET_CFG_RXCRC 0x1000 @@ -372,7 +376,7 @@ typedef struct tlan_private_tag { /* Generic MII/PHY Registers */ #define MII_GEN_CTL 0x00 -#define MII_GC_RESET 0x8000 +#define MII_GC_RESET 0x8000 #define MII_GC_LOOPBK 0x4000 #define MII_GC_SPEEDSEL 0x2000 #define MII_GC_AUTOENB 0x1000 @@ -397,9 +401,9 @@ typedef struct tlan_private_tag { #define MII_GS_EXTCAP 0x0001 #define MII_GEN_ID_HI 0x02 #define MII_GEN_ID_LO 0x03 -#define MII_GIL_OUI 0xFC00 -#define MII_GIL_MODEL 0x03F0 -#define MII_GIL_REVISION 0x000F +#define MII_GIL_OUI 0xFC00 +#define MII_GIL_MODEL 0x03F0 +#define MII_GIL_REVISION 0x000F #define MII_AN_ADV 0x04 #define MII_AN_LPA 0x05 #define MII_AN_EXP 0x06 @@ -408,7 +412,7 @@ typedef struct tlan_private_tag { #define TLAN_TLPHY_ID 0x10 #define TLAN_TLPHY_CTL 0x11 -#define TLAN_TC_IGLINK 0x8000 +#define TLAN_TC_IGLINK 0x8000 #define TLAN_TC_SWAPOL 0x4000 #define TLAN_TC_AUISEL 0x2000 #define TLAN_TC_SQEEN 0x1000 @@ -435,41 +439,41 @@ typedef struct tlan_private_tag { #define LEVEL1_ID1 0x7810 #define LEVEL1_ID2 0x0000 -#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0 +#define CIRC_INC(a, b) if (++a >= b) a = 0 /* Routines to access internal registers. */ -static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr) +static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr) { outw(internal_addr, base_addr + TLAN_DIO_ADR); return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)); -} /* TLan_DioRead8 */ +} -static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr) +static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr) { outw(internal_addr, base_addr + TLAN_DIO_ADR); return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)); -} /* TLan_DioRead16 */ +} -static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr) +static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr) { outw(internal_addr, base_addr + TLAN_DIO_ADR); return inl(base_addr + TLAN_DIO_DATA); -} /* TLan_DioRead32 */ +} -static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data) +static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data) { outw(internal_addr, base_addr + TLAN_DIO_ADR); outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3)); @@ -479,7 +483,7 @@ static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data) -static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data) +static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data) { outw(internal_addr, base_addr + TLAN_DIO_ADR); outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); @@ -489,16 +493,16 @@ static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data) -static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data) +static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data) { outw(internal_addr, base_addr + TLAN_DIO_ADR); outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); } -#define TLan_ClearBit( bit, port ) outb_p(inb_p(port) & ~bit, port) -#define TLan_GetBit( bit, port ) ((int) (inb_p(port) & bit)) -#define TLan_SetBit( bit, port ) outb_p(inb_p(port) | bit, port) +#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port) +#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit)) +#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port) /* * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those @@ -506,37 +510,37 @@ static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data) * * The original code was: * - * u32 xor( u32 a, u32 b ) { return ( ( a && ! b ) || ( ! a && b ) ); } + * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); } * - * #define XOR8( a, b, c, d, e, f, g, h ) \ - * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) ) - * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) ) + * #define XOR8(a, b, c, d, e, f, g, h) \ + * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) ) + * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) ) * - * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), - * DA(a,30), DA(a,36), DA(a,42) ); - * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), - * DA(a,31), DA(a,37), DA(a,43) ) << 1; - * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), - * DA(a,32), DA(a,38), DA(a,44) ) << 2; - * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), - * DA(a,33), DA(a,39), DA(a,45) ) << 3; - * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), - * DA(a,34), DA(a,40), DA(a,46) ) << 4; - * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), - * DA(a,35), DA(a,41), DA(a,47) ) << 5; + * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), + * DA(a,30), DA(a,36), DA(a,42)); + * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), + * DA(a,31), DA(a,37), DA(a,43)) << 1; + * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), + * DA(a,32), DA(a,38), DA(a,44)) << 2; + * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), + * DA(a,33), DA(a,39), DA(a,45)) << 3; + * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), + * DA(a,34), DA(a,40), DA(a,46)) << 4; + * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), + * DA(a,35), DA(a,41), DA(a,47)) << 5; * */ -static inline u32 TLan_HashFunc( const u8 *a ) +static inline u32 tlan_hash_func(const u8 *a) { - u8 hash; + u8 hash; - hash = (a[0]^a[3]); /* & 077 */ - hash ^= ((a[0]^a[3])>>6); /* & 003 */ - hash ^= ((a[1]^a[4])<<2); /* & 074 */ - hash ^= ((a[1]^a[4])>>4); /* & 017 */ - hash ^= ((a[2]^a[5])<<4); /* & 060 */ - hash ^= ((a[2]^a[5])>>2); /* & 077 */ + hash = (a[0]^a[3]); /* & 077 */ + hash ^= ((a[0]^a[3])>>6); /* & 003 */ + hash ^= ((a[1]^a[4])<<2); /* & 074 */ + hash ^= ((a[1]^a[4])>>4); /* & 017 */ + hash ^= ((a[2]^a[5])<<4); /* & 060 */ + hash ^= ((a[2]^a[5])>>2); /* & 077 */ - return hash & 077; + return hash & 077; } #endif diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b100bd50a0d7..f5e9ac00a07b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -34,6 +34,8 @@ * Modifications for 2.3.99-pre5 kernel. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #define DRV_NAME "tun" #define DRV_VERSION "1.6" #define DRV_DESCRIPTION "Universal TUN/TAP device driver" @@ -76,11 +78,27 @@ #ifdef TUN_DEBUG static int debug; -#define DBG if(tun->debug)printk -#define DBG1 if(debug==2)printk +#define tun_debug(level, tun, fmt, args...) \ +do { \ + if (tun->debug) \ + netdev_printk(level, tun->dev, fmt, ##args); \ +} while (0) +#define DBG1(level, fmt, args...) \ +do { \ + if (debug == 2) \ + printk(level fmt, ##args); \ +} while (0) #else -#define DBG( a... ) -#define DBG1( a... ) +#define tun_debug(level, tun, fmt, args...) \ +do { \ + if (0) \ + netdev_printk(level, tun->dev, fmt, ##args); \ +} while (0) +#define DBG1(level, fmt, args...) \ +do { \ + if (0) \ + printk(level fmt, ##args); \ +} while (0) #endif #define FLT_EXACT_COUNT 8 @@ -205,7 +223,7 @@ static void tun_put(struct tun_struct *tun) tun_detach(tfile->tun); } -/* TAP filterting */ +/* TAP filtering */ static void addr_hash_set(u32 *mask, const u8 *addr) { int n = ether_crc(ETH_ALEN, addr) >> 26; @@ -360,7 +378,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); - DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); + tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); /* Drop packet if interface is not attached */ if (!tun->tfile) @@ -499,7 +517,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait) sk = tun->socket.sk; - DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); + tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); poll_wait(file, &tun->wq.wait, wait); @@ -690,7 +708,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, if (!tun) return -EBADFD; - DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); + tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count); result = tun_get_user(tun, iv, iov_length(iv, count), file->f_flags & O_NONBLOCK); @@ -739,7 +757,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun, else if (sinfo->gso_type & SKB_GSO_UDP) gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; else { - printk(KERN_ERR "tun: unexpected GSO type: " + pr_err("unexpected GSO type: " "0x%x, gso_size %d, hdr_len %d\n", sinfo->gso_type, gso.gso_size, gso.hdr_len); @@ -786,7 +804,7 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct sk_buff *skb; ssize_t ret = 0; - DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); + tun_debug(KERN_INFO, tun, "tun_chr_read\n"); add_wait_queue(&tun->wq.wait, &wait); while (len) { @@ -1083,7 +1101,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || device_create_file(&tun->dev->dev, &dev_attr_owner) || device_create_file(&tun->dev->dev, &dev_attr_group)) - printk(KERN_ERR "Failed to create tun sysfs files\n"); + pr_err("Failed to create tun sysfs files\n"); sk->sk_destruct = tun_sock_destruct; @@ -1092,7 +1110,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) goto failed; } - DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); + tun_debug(KERN_INFO, tun, "tun_set_iff\n"); if (ifr->ifr_flags & IFF_NO_PI) tun->flags |= TUN_NO_PI; @@ -1129,7 +1147,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) static int tun_get_iff(struct net *net, struct tun_struct *tun, struct ifreq *ifr) { - DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); + tun_debug(KERN_INFO, tun, "tun_get_iff\n"); strcpy(ifr->ifr_name, tun->dev->name); @@ -1142,7 +1160,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun, * privs required. */ static int set_offload(struct net_device *dev, unsigned long arg) { - unsigned int old_features, features; + u32 old_features, features; old_features = dev->features; /* Unset features, set them as we chew on the arg. */ @@ -1229,7 +1247,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, if (!tun) goto unlock; - DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); + tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd); ret = 0; switch (cmd) { @@ -1249,8 +1267,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, else tun->flags &= ~TUN_NOCHECKSUM; - DBG(KERN_INFO "%s: checksum %s\n", - tun->dev->name, arg ? "disabled" : "enabled"); + tun_debug(KERN_INFO, tun, "checksum %s\n", + arg ? "disabled" : "enabled"); break; case TUNSETPERSIST: @@ -1260,33 +1278,34 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, else tun->flags &= ~TUN_PERSIST; - DBG(KERN_INFO "%s: persist %s\n", - tun->dev->name, arg ? "enabled" : "disabled"); + tun_debug(KERN_INFO, tun, "persist %s\n", + arg ? "enabled" : "disabled"); break; case TUNSETOWNER: /* Set owner of the device */ tun->owner = (uid_t) arg; - DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); + tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner); break; case TUNSETGROUP: /* Set group of the device */ tun->group= (gid_t) arg; - DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group); + tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group); break; case TUNSETLINK: /* Only allow setting the type when the interface is down */ if (tun->dev->flags & IFF_UP) { - DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", - tun->dev->name); + tun_debug(KERN_INFO, tun, + "Linktype set failed because interface is up\n"); ret = -EBUSY; } else { tun->dev->type = (int) arg; - DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); + tun_debug(KERN_INFO, tun, "linktype set to %d\n", + tun->dev->type); ret = 0; } break; @@ -1318,8 +1337,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, case SIOCSIFHWADDR: /* Set hw address */ - DBG(KERN_DEBUG "%s: set hw address: %pM\n", - tun->dev->name, ifr.ifr_hwaddr.sa_data); + tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", + ifr.ifr_hwaddr.sa_data); ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); break; @@ -1433,7 +1452,7 @@ static int tun_chr_fasync(int fd, struct file *file, int on) if (!tun) return -EBADFD; - DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on); + tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on); if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0) goto out; @@ -1455,7 +1474,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) { struct tun_file *tfile; - DBG1(KERN_INFO "tunX: tun_chr_open\n"); + DBG1(KERN_INFO, "tunX: tun_chr_open\n"); tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); if (!tfile) @@ -1476,7 +1495,7 @@ static int tun_chr_close(struct inode *inode, struct file *file) if (tun) { struct net_device *dev = tun->dev; - DBG(KERN_INFO "%s: tun_chr_close\n", dev->name); + tun_debug(KERN_INFO, tun, "tun_chr_close\n"); __tun_detach(tun); @@ -1607,18 +1626,18 @@ static int __init tun_init(void) { int ret = 0; - printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); - printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); + pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); + pr_info("%s\n", DRV_COPYRIGHT); ret = rtnl_link_register(&tun_link_ops); if (ret) { - printk(KERN_ERR "tun: Can't register link_ops\n"); + pr_err("Can't register link_ops\n"); goto err_linkops; } ret = misc_register(&tun_miscdev); if (ret) { - printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); + pr_err("Can't register misc device %d\n", TUN_MINOR); goto err_misc; } return 0; diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index a3c46f6a15e7..7fa5ec2de942 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -123,12 +123,11 @@ static const int multicast_filter_limit = 32; #include <linux/in6.h> #include <linux/dma-mapping.h> #include <linux/firmware.h> -#include <generated/utsrelease.h> #include "typhoon.h" MODULE_AUTHOR("David Dillow <dave@thedillows.org>"); -MODULE_VERSION(UTS_RELEASE); +MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FIRMWARE_NAME); MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 109751bad3bb..f967913e11bc 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -328,13 +328,13 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) { static const char ifname[] = "usbpn%d"; const struct usb_cdc_union_desc *union_header = NULL; - const struct usb_cdc_header_desc *phonet_header = NULL; const struct usb_host_interface *data_desc; struct usb_interface *data_intf; struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *dev; struct usbpn_dev *pnd; u8 *data; + int phonet = 0; int len, err; data = intf->altsetting->extra; @@ -355,10 +355,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) (struct usb_cdc_union_desc *)data; break; case 0xAB: - if (phonet_header || dlen < 5) - break; - phonet_header = - (struct usb_cdc_header_desc *)data; + phonet = 1; break; } } @@ -366,7 +363,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) len -= dlen; } - if (!union_header || !phonet_header) + if (!union_header || !phonet) return -EINVAL; data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0); @@ -392,7 +389,6 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) pnd = netdev_priv(dev); SET_NETDEV_DEV(dev, &intf->dev); - netif_stop_queue(dev); pnd->dev = dev; pnd->usb = usb_get_dev(usbdev); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index cc83fa71c3ff..105d7f0630cc 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -403,17 +403,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (tb[IFLA_ADDRESS] == NULL) random_ether_addr(dev->dev_addr); - if (tb[IFLA_IFNAME]) - nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); - else - snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); - - if (strchr(dev->name, '%')) { - err = dev_alloc_name(dev, dev->name); - if (err < 0) - goto err_alloc_name; - } - err = register_netdevice(dev); if (err < 0) goto err_register_dev; @@ -433,7 +422,6 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, err_register_dev: /* nothing to do */ -err_alloc_name: err_configure_peer: unregister_netdevice(peer); return err; diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 09cac704fdd7..0d6fec6b7d93 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -2923,6 +2923,7 @@ static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern) static int velocity_set_wol(struct velocity_info *vptr) { struct mac_regs __iomem *regs = vptr->mac_regs; + enum speed_opt spd_dpx = vptr->options.spd_dpx; static u8 buf[256]; int i; @@ -2968,6 +2969,12 @@ static int velocity_set_wol(struct velocity_info *vptr) writew(0x0FFF, ®s->WOLSRClr); + if (spd_dpx == SPD_DPX_1000_FULL) + goto mac_done; + + if (spd_dpx != SPD_DPX_AUTO) + goto advertise_done; + if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs); @@ -2978,6 +2985,7 @@ static int velocity_set_wol(struct velocity_info *vptr) if (vptr->mii_status & VELOCITY_SPEED_1000) MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs); +advertise_done: BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); { @@ -2987,6 +2995,7 @@ static int velocity_set_wol(struct velocity_info *vptr) writeb(GCR, ®s->CHIPGCR); } +mac_done: BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR); /* Turn on SWPTAG just before entering power mode */ BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW); diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index aa2e69b9ff61..d7227539484e 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h @@ -361,7 +361,7 @@ enum velocity_owner { #define MAC_REG_CHIPGSR 0x9C #define MAC_REG_TESTCFG 0x9D #define MAC_REG_DEBUG 0x9E -#define MAC_REG_CHIPGCR 0x9F +#define MAC_REG_CHIPGCR 0x9F /* Chip Operation and Diagnostic Control */ #define MAC_REG_WOLCR0_SET 0xA0 #define MAC_REG_WOLCR1_SET 0xA1 #define MAC_REG_PWCFG_SET 0xA2 @@ -848,10 +848,10 @@ enum velocity_owner { * Bits in CHIPGCR register */ -#define CHIPGCR_FCGMII 0x80 /* enable GMII mode */ -#define CHIPGCR_FCFDX 0x40 +#define CHIPGCR_FCGMII 0x80 /* force GMII (else MII only) */ +#define CHIPGCR_FCFDX 0x40 /* force full duplex */ #define CHIPGCR_FCRESV 0x20 -#define CHIPGCR_FCMODE 0x10 +#define CHIPGCR_FCMODE 0x10 /* enable MAC forced mode */ #define CHIPGCR_LPSOPT 0x08 #define CHIPGCR_TM1US 0x04 #define CHIPGCR_TM0US 0x02 diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c index 228d4f7a58af..e74e4b42592d 100644 --- a/drivers/net/vxge/vxge-config.c +++ b/drivers/net/vxge/vxge-config.c @@ -387,8 +387,8 @@ vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev, data1 = steer_ctrl = 0; status = vxge_hw_vpath_fw_api(vpath, - VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, VXGE_HW_FW_API_GET_EPROM_REV, + VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO, 0, &data0, &data1, &steer_ctrl); if (status != VXGE_HW_OK) break; @@ -2868,6 +2868,8 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, ring->rxd_init = attr->rxd_init; ring->rxd_term = attr->rxd_term; ring->buffer_mode = config->buffer_mode; + ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved; + ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved; ring->rxds_limit = config->rxds_limit; ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); @@ -3511,6 +3513,8 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, /* apply "interrupts per txdl" attribute */ fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ; + fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved; + fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved; if (fifo->config->intr) fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST; @@ -4377,6 +4381,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) } writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); + vpath->tim_tti_cfg1_saved = val64; + val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]); if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { @@ -4433,6 +4439,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) } writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); + vpath->tim_tti_cfg3_saved = val64; } if (config->ring.enable == VXGE_HW_RING_ENABLE) { @@ -4481,6 +4488,8 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) } writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); + vpath->tim_rti_cfg1_saved = val64; + val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]); if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) { @@ -4537,6 +4546,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) } writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); + vpath->tim_rti_cfg3_saved = val64; } val64 = 0; @@ -4555,26 +4565,6 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id) return status; } -void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id) -{ - struct __vxge_hw_virtualpath *vpath; - struct vxge_hw_vpath_reg __iomem *vp_reg; - struct vxge_hw_vp_config *config; - u64 val64; - - vpath = &hldev->virtual_paths[vp_id]; - vp_reg = vpath->vp_reg; - config = vpath->vp_config; - - if (config->fifo.enable == VXGE_HW_FIFO_ENABLE && - config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { - config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; - val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); - val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; - writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); - } -} - /* * __vxge_hw_vpath_initialize * This routine is the final phase of init which initializes the diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h index e249e288d160..3c53aa732c9d 100644 --- a/drivers/net/vxge/vxge-config.h +++ b/drivers/net/vxge/vxge-config.h @@ -682,6 +682,10 @@ struct __vxge_hw_virtualpath { u32 vsport_number; u32 max_kdfc_db; u32 max_nofl_db; + u64 tim_tti_cfg1_saved; + u64 tim_tti_cfg3_saved; + u64 tim_rti_cfg1_saved; + u64 tim_rti_cfg3_saved; struct __vxge_hw_ring *____cacheline_aligned ringh; struct __vxge_hw_fifo *____cacheline_aligned fifoh; @@ -921,6 +925,9 @@ struct __vxge_hw_ring { u32 doorbell_cnt; u32 total_db_cnt; u64 rxds_limit; + u32 rtimer; + u64 tim_rti_cfg1_saved; + u64 tim_rti_cfg3_saved; enum vxge_hw_status (*callback)( struct __vxge_hw_ring *ringh, @@ -1000,6 +1007,9 @@ struct __vxge_hw_fifo { u32 per_txdl_space; u32 vp_id; u32 tx_intr_num; + u32 rtimer; + u64 tim_tti_cfg1_saved; + u64 tim_tti_cfg3_saved; enum vxge_hw_status (*callback)( struct __vxge_hw_fifo *fifo_handle, diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index c81a6512c683..395423aeec00 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c @@ -371,9 +371,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, struct vxge_hw_ring_rxd_info ext_info; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", ring->ndev->name, __func__, __LINE__); - ring->pkts_processed = 0; - - vxge_hw_ring_replenish(ringh); do { prefetch((char *)dtr + L1_CACHE_BYTES); @@ -1588,6 +1585,36 @@ static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id) return ret; } +/* Configure CI */ +static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev) +{ + int i = 0; + + /* Enable CI for RTI */ + if (vdev->config.intr_type == MSI_X) { + for (i = 0; i < vdev->no_of_vpath; i++) { + struct __vxge_hw_ring *hw_ring; + + hw_ring = vdev->vpaths[i].ring.handle; + vxge_hw_vpath_dynamic_rti_ci_set(hw_ring); + } + } + + /* Enable CI for TTI */ + for (i = 0; i < vdev->no_of_vpath; i++) { + struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle; + vxge_hw_vpath_tti_ci_set(hw_fifo); + /* + * For Inta (with or without napi), Set CI ON for only one + * vpath. (Have only one free running timer). + */ + if ((vdev->config.intr_type == INTA) && (i == 0)) + break; + } + + return; +} + static int do_vxge_reset(struct vxgedev *vdev, int event) { enum vxge_hw_status status; @@ -1753,6 +1780,9 @@ static int do_vxge_reset(struct vxgedev *vdev, int event) netif_tx_wake_all_queues(vdev->ndev); } + /* configure CI */ + vxge_config_ci_for_tti_rti(vdev); + out: vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__); @@ -1793,22 +1823,29 @@ static void vxge_reset(struct work_struct *work) */ static int vxge_poll_msix(struct napi_struct *napi, int budget) { - struct vxge_ring *ring = - container_of(napi, struct vxge_ring, napi); + struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi); + int pkts_processed; int budget_org = budget; - ring->budget = budget; + ring->budget = budget; + ring->pkts_processed = 0; vxge_hw_vpath_poll_rx(ring->handle); + pkts_processed = ring->pkts_processed; if (ring->pkts_processed < budget_org) { napi_complete(napi); + /* Re enable the Rx interrupts for the vpath */ vxge_hw_channel_msix_unmask( (struct __vxge_hw_channel *)ring->handle, ring->rx_vector_no); + mmiowb(); } - return ring->pkts_processed; + /* We are copying and returning the local variable, in case if after + * clearing the msix interrupt above, if the interrupt fires right + * away which can preempt this NAPI thread */ + return pkts_processed; } static int vxge_poll_inta(struct napi_struct *napi, int budget) @@ -1824,6 +1861,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget) for (i = 0; i < vdev->no_of_vpath; i++) { ring = &vdev->vpaths[i].ring; ring->budget = budget; + ring->pkts_processed = 0; vxge_hw_vpath_poll_rx(ring->handle); pkts_processed += ring->pkts_processed; budget -= ring->pkts_processed; @@ -2054,6 +2092,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev) netdev_get_tx_queue(vdev->ndev, 0); vpath->fifo.indicate_max_pkts = vdev->config.fifo_indicate_max_pkts; + vpath->fifo.tx_vector_no = 0; vpath->ring.rx_vector_no = 0; vpath->ring.rx_csum = vdev->rx_csum; vpath->ring.rx_hwts = vdev->rx_hwts; @@ -2079,6 +2118,61 @@ static int vxge_open_vpaths(struct vxgedev *vdev) return VXGE_HW_OK; } +/** + * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing + * if the interrupts are not within a range + * @fifo: pointer to transmit fifo structure + * Description: The function changes boundary timer and restriction timer + * value depends on the traffic + * Return Value: None + */ +static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo) +{ + fifo->interrupt_count++; + if (jiffies > fifo->jiffies + HZ / 100) { + struct __vxge_hw_fifo *hw_fifo = fifo->handle; + + fifo->jiffies = jiffies; + if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT && + hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) { + hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL; + vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); + } else if (hw_fifo->rtimer != 0) { + hw_fifo->rtimer = 0; + vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo); + } + fifo->interrupt_count = 0; + } +} + +/** + * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing + * if the interrupts are not within a range + * @ring: pointer to receive ring structure + * Description: The function increases of decreases the packet counts within + * the ranges of traffic utilization, if the interrupts due to this ring are + * not within a fixed range. + * Return Value: Nothing + */ +static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) +{ + ring->interrupt_count++; + if (jiffies > ring->jiffies + HZ / 100) { + struct __vxge_hw_ring *hw_ring = ring->handle; + + ring->jiffies = jiffies; + if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT && + hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) { + hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL; + vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); + } else if (hw_ring->rtimer != 0) { + hw_ring->rtimer = 0; + vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring); + } + ring->interrupt_count = 0; + } +} + /* * vxge_isr_napi * @irq: the irq of the device. @@ -2139,24 +2233,39 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) #ifdef CONFIG_PCI_MSI -static irqreturn_t -vxge_tx_msix_handle(int irq, void *dev_id) +static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) { struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; + adaptive_coalesce_tx_interrupts(fifo); + + vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle, + fifo->tx_vector_no); + + vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle, + fifo->tx_vector_no); + VXGE_COMPLETE_VPATH_TX(fifo); + vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle, + fifo->tx_vector_no); + + mmiowb(); + return IRQ_HANDLED; } -static irqreturn_t -vxge_rx_msix_napi_handle(int irq, void *dev_id) +static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id) { struct vxge_ring *ring = (struct vxge_ring *)dev_id; - /* MSIX_IDX for Rx is 1 */ + adaptive_coalesce_rx_interrupts(ring); + vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle, - ring->rx_vector_no); + ring->rx_vector_no); + + vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle, + ring->rx_vector_no); napi_schedule(&ring->napi); return IRQ_HANDLED; @@ -2173,14 +2282,20 @@ vxge_alarm_msix_handle(int irq, void *dev_id) VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID; for (i = 0; i < vdev->no_of_vpath; i++) { + /* Reduce the chance of loosing alarm interrupts by masking + * the vector. A pending bit will be set if an alarm is + * generated and on unmask the interrupt will be fired. + */ vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id); + vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id); + mmiowb(); status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, vdev->exec_mode); if (status == VXGE_HW_OK) { - vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, - msix_id); + msix_id); + mmiowb(); continue; } vxge_debug_intr(VXGE_ERR, @@ -2299,6 +2414,9 @@ static int vxge_enable_msix(struct vxgedev *vdev) vpath->ring.rx_vector_no = (vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE) + 1; + vpath->fifo.tx_vector_no = (vpath->device_id * + VXGE_HW_VPATH_MSIX_ACTIVE); + vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, VXGE_ALARM_MSIX_ID); } @@ -2474,8 +2592,9 @@ INTA_MODE: "%s:vxge:INTA", vdev->ndev->name); vxge_hw_device_set_intr_type(vdev->devh, VXGE_HW_INTR_MODE_IRQLINE); - vxge_hw_vpath_tti_ci_set(vdev->devh, - vdev->vpaths[0].device_id); + + vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle); + ret = request_irq((int) vdev->pdev->irq, vxge_isr_napi, IRQF_SHARED, vdev->desc[0], vdev); @@ -2745,6 +2864,10 @@ static int vxge_open(struct net_device *dev) } netif_tx_start_all_queues(vdev->ndev); + + /* configure CI */ + vxge_config_ci_for_tti_rti(vdev); + goto out0; out2: @@ -3264,19 +3387,6 @@ static const struct net_device_ops vxge_netdev_ops = { #endif }; -static int __devinit vxge_device_revision(struct vxgedev *vdev) -{ - int ret; - u8 revision; - - ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision); - if (ret) - return -EIO; - - vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION); - return 0; -} - static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, struct vxge_config *config, int high_dma, int no_of_vpath, @@ -3316,10 +3426,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, memcpy(&vdev->config, config, sizeof(struct vxge_config)); vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ vdev->rx_hwts = 0; - - ret = vxge_device_revision(vdev); - if (ret < 0) - goto _out1; + vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION); SET_NETDEV_DEV(ndev, &vdev->pdev->dev); @@ -3348,7 +3455,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, vxge_debug_init(VXGE_ERR, "%s: vpath memory allocation failed", vdev->ndev->name); - ret = -ENODEV; + ret = -ENOMEM; goto _out1; } @@ -3369,11 +3476,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, if (vdev->config.gro_enable) ndev->features |= NETIF_F_GRO; - if (register_netdev(ndev)) { + ret = register_netdev(ndev); + if (ret) { vxge_debug_init(vxge_hw_device_trace_level_get(hldev), "%s: %s : device registration failed!", ndev->name, __func__); - ret = -ENODEV; goto _out2; } @@ -3444,6 +3551,11 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev) /* in 2.6 will call stop() if device is up */ unregister_netdev(dev); + kfree(vdev->vpaths); + + /* we are safe to free it now */ + free_netdev(dev); + vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered", buf); vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf, @@ -3799,7 +3911,7 @@ static void __devinit vxge_device_config_init( break; case MSI_X: - device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; + device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT; break; } @@ -4335,10 +4447,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit1; } - if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) { + ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME); + if (ret) { vxge_debug_init(VXGE_ERR, "%s : request regions failed", __func__); - ret = -ENODEV; goto _exit1; } @@ -4446,7 +4558,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) if (!img[i].is_valid) break; vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version " - "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i, + "%d.%d.%d.%d", VXGE_DRIVER_NAME, i, VXGE_EPROM_IMG_MAJOR(img[i].version), VXGE_EPROM_IMG_MINOR(img[i].version), VXGE_EPROM_IMG_FIX(img[i].version), @@ -4643,8 +4755,9 @@ _exit6: _exit5: vxge_device_unregister(hldev); _exit4: - pci_disable_sriov(pdev); + pci_set_drvdata(pdev, NULL); vxge_hw_device_terminate(hldev); + pci_disable_sriov(pdev); _exit3: iounmap(attr.bar0); _exit2: @@ -4655,7 +4768,7 @@ _exit0: kfree(ll_config); kfree(device_config); driver_config->config_dev_cnt--; - pci_set_drvdata(pdev, NULL); + driver_config->total_dev_cnt--; return ret; } @@ -4668,45 +4781,34 @@ _exit0: static void __devexit vxge_remove(struct pci_dev *pdev) { struct __vxge_hw_device *hldev; - struct vxgedev *vdev = NULL; - struct net_device *dev; - int i = 0; + struct vxgedev *vdev; + int i; hldev = pci_get_drvdata(pdev); - if (hldev == NULL) return; - dev = hldev->ndev; - vdev = netdev_priv(dev); + vdev = netdev_priv(hldev->ndev); vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__); - vxge_debug_init(vdev->level_trace, "%s : removing PCI device...", __func__); - vxge_device_unregister(hldev); - for (i = 0; i < vdev->no_of_vpath; i++) { + for (i = 0; i < vdev->no_of_vpath; i++) vxge_free_mac_add_list(&vdev->vpaths[i]); - vdev->vpaths[i].mcast_addr_cnt = 0; - vdev->vpaths[i].mac_addr_cnt = 0; - } - - kfree(vdev->vpaths); + vxge_device_unregister(hldev); + pci_set_drvdata(pdev, NULL); + /* Do not call pci_disable_sriov here, as it will break child devices */ + vxge_hw_device_terminate(hldev); iounmap(vdev->bar0); - - /* we are safe to free it now */ - free_netdev(dev); + pci_release_region(pdev, 0); + pci_disable_device(pdev); + driver_config->config_dev_cnt--; + driver_config->total_dev_cnt--; vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered", __func__, __LINE__); - - vxge_hw_device_terminate(hldev); - - pci_disable_device(pdev); - pci_release_region(pdev, 0); - pci_set_drvdata(pdev, NULL); vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__, __LINE__); } diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h index 5746fedc356f..40474f0da576 100644 --- a/drivers/net/vxge/vxge-main.h +++ b/drivers/net/vxge/vxge-main.h @@ -59,11 +59,13 @@ #define VXGE_TTI_LTIMER_VAL 1000 #define VXGE_T1A_TTI_LTIMER_VAL 80 #define VXGE_TTI_RTIMER_VAL 0 +#define VXGE_TTI_RTIMER_ADAPT_VAL 10 #define VXGE_T1A_TTI_RTIMER_VAL 400 #define VXGE_RTI_BTIMER_VAL 250 #define VXGE_RTI_LTIMER_VAL 100 #define VXGE_RTI_RTIMER_VAL 0 -#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH +#define VXGE_RTI_RTIMER_ADAPT_VAL 15 +#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH #define VXGE_ISR_POLLING_CNT 8 #define VXGE_MAX_CONFIG_DEV 0xFF #define VXGE_EXEC_MODE_DISABLE 0 @@ -107,6 +109,14 @@ #define RTI_T1A_RX_UFC_C 50 #define RTI_T1A_RX_UFC_D 60 +/* + * The interrupt rate is maintained at 3k per second with the moderation + * parameters for most traffic but not all. This is the maximum interrupt + * count allowed per function with INTA or per vector in the case of + * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A. + */ +#define VXGE_T1A_MAX_INTERRUPT_COUNT 100 +#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT 200 /* Milli secs timer period */ #define VXGE_TIMER_DELAY 10000 @@ -247,6 +257,11 @@ struct vxge_fifo { int tx_steering_type; int indicate_max_pkts; + /* Adaptive interrupt moderation parameters used in T1A */ + unsigned long interrupt_count; + unsigned long jiffies; + + u32 tx_vector_no; /* Tx stats */ struct vxge_fifo_stats stats; } ____cacheline_aligned; @@ -271,6 +286,10 @@ struct vxge_ring { */ int driver_id; + /* Adaptive interrupt moderation parameters used in T1A */ + unsigned long interrupt_count; + unsigned long jiffies; + /* copy of the flag indicating whether rx_csum is to be used */ u32 rx_csum:1, rx_hwts:1; @@ -286,7 +305,7 @@ struct vxge_ring { int vlan_tag_strip; struct vlan_group *vlgrp; - int rx_vector_no; + u32 rx_vector_no; enum vxge_hw_status last_status; /* Rx stats */ diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c index 4c10d6c4075f..8674f331311c 100644 --- a/drivers/net/vxge/vxge-traffic.c +++ b/drivers/net/vxge/vxge-traffic.c @@ -218,6 +218,68 @@ exit: return status; } +void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo) +{ + struct vxge_hw_vpath_reg __iomem *vp_reg; + struct vxge_hw_vp_config *config; + u64 val64; + + if (fifo->config->enable != VXGE_HW_FIFO_ENABLE) + return; + + vp_reg = fifo->vp_reg; + config = container_of(fifo->config, struct vxge_hw_vp_config, fifo); + + if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { + config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; + val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + fifo->tim_tti_cfg1_saved = val64; + writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); + } +} + +void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring) +{ + u64 val64 = ring->tim_rti_cfg1_saved; + + val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; + ring->tim_rti_cfg1_saved = val64; + writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); +} + +void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo) +{ + u64 val64 = fifo->tim_tti_cfg3_saved; + u64 timer = (fifo->rtimer * 1000) / 272; + + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); + if (timer) + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | + VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5); + + writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); + /* tti_cfg3_saved is not updated again because it is + * initialized at one place only - init time. + */ +} + +void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring) +{ + u64 val64 = ring->tim_rti_cfg3_saved; + u64 timer = (ring->rtimer * 1000) / 272; + + val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); + if (timer) + val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | + VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4); + + writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); + /* rti_cfg3_saved is not updated again because it is + * initialized at one place only - init time. + */ +} + /** * vxge_hw_channel_msix_mask - Mask MSIX Vector. * @channeh: Channel for rx or tx handle @@ -254,6 +316,23 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) } /** + * vxge_hw_channel_msix_clear - Unmask the MSIX Vector. + * @channel: Channel for rx or tx handle + * @msix_id: MSI ID + * + * The function unmasks the msix interrupt for the given msix_id + * if configured in MSIX oneshot mode + * + * Returns: 0 + */ +void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id) +{ + __vxge_hw_pio_mem_write32_upper( + (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), + &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]); +} + +/** * vxge_hw_device_set_intr_type - Updates the configuration * with new interrupt type. * @hldev: HW device handle. @@ -2191,19 +2270,14 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, if (vpath->hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( + VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN, + 0, 32), &vp_reg->one_shot_vect0_en); + __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, 0, 32), &vp_reg->one_shot_vect1_en); - } - - if (vpath->hldev->config.intr_mode == - VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, 0, 32), &vp_reg->one_shot_vect2_en); - - __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( - VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN, - 0, 32), &vp_reg->one_shot_vect3_en); } } @@ -2229,6 +2303,32 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) } /** + * vxge_hw_vpath_msix_clear - Clear MSIX Vector. + * @vp: Virtual Path handle. + * @msix_id: MSI ID + * + * The function clears the msix interrupt for the given msix_id + * + * Returns: 0, + * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range + * status. + * See also: + */ +void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) +{ + struct __vxge_hw_device *hldev = vp->vpath->hldev; + + if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)) + __vxge_hw_pio_mem_write32_upper( + (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), + &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]); + else + __vxge_hw_pio_mem_write32_upper( + (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), + &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]); +} + +/** * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. * @vp: Virtual Path handle. * @msix_id: MSI ID diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h index d48486d6afa1..9d9dfda4c7ab 100644 --- a/drivers/net/vxge/vxge-traffic.h +++ b/drivers/net/vxge/vxge-traffic.h @@ -2142,6 +2142,10 @@ void vxge_hw_device_clear_tx_rx( * Virtual Paths */ +void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring); + +void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo); + u32 vxge_hw_vpath_id( struct __vxge_hw_vpath_handle *vpath_handle); @@ -2245,6 +2249,8 @@ void vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle, int msix_id); +void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id); + void vxge_hw_device_flush_io(struct __vxge_hw_device *devh); void @@ -2270,6 +2276,9 @@ void vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id); void +vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id); + +void vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh); @@ -2282,7 +2291,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh); int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); -void -vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); +void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo); + +void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring); #endif diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h index ad2f99b9bcf3..581e21525e85 100644 --- a/drivers/net/vxge/vxge-version.h +++ b/drivers/net/vxge/vxge-version.h @@ -16,8 +16,8 @@ #define VXGE_VERSION_MAJOR "2" #define VXGE_VERSION_MINOR "5" -#define VXGE_VERSION_FIX "1" -#define VXGE_VERSION_BUILD "22082" +#define VXGE_VERSION_FIX "2" +#define VXGE_VERSION_BUILD "22259" #define VXGE_VERSION_FOR "k" #define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld)) diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index b4338f389394..7aeb113cbb90 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -274,6 +274,7 @@ source "drivers/net/wireless/b43legacy/Kconfig" source "drivers/net/wireless/hostap/Kconfig" source "drivers/net/wireless/ipw2x00/Kconfig" source "drivers/net/wireless/iwlwifi/Kconfig" +source "drivers/net/wireless/iwlegacy/Kconfig" source "drivers/net/wireless/iwmc3200wifi/Kconfig" source "drivers/net/wireless/libertas/Kconfig" source "drivers/net/wireless/orinoco/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 9760561a27a5..ddd3fb6ba1d3 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -24,7 +24,7 @@ obj-$(CONFIG_B43LEGACY) += b43legacy/ obj-$(CONFIG_ZD1211RW) += zd1211rw/ obj-$(CONFIG_RTL8180) += rtl818x/ obj-$(CONFIG_RTL8187) += rtl818x/ -obj-$(CONFIG_RTL8192CE) += rtlwifi/ +obj-$(CONFIG_RTLWIFI) += rtlwifi/ # 16-bit wireless PCMCIA client drivers obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o @@ -41,7 +41,8 @@ obj-$(CONFIG_ADM8211) += adm8211.o obj-$(CONFIG_MWL8K) += mwl8k.o -obj-$(CONFIG_IWLWIFI) += iwlwifi/ +obj-$(CONFIG_IWLAGN) += iwlwifi/ +obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/ obj-$(CONFIG_RT2X00) += rt2x00/ obj-$(CONFIG_P54_COMMON) += p54/ diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index f9aa1bc0a947..afe2cbc6cb24 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c @@ -1658,7 +1658,7 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb, } /* Put adm8211_tx_hdr on skb and transmit */ -static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +static void adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb) { struct adm8211_tx_hdr *txhdr; size_t payload_len, hdrlen; @@ -1707,8 +1707,6 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb) txhdr->retry_limit = info->control.rates[0].count; adm8211_tx_raw(dev, skb, plcp_signal, hdrlen); - - return NETDEV_TX_OK; } static int adm8211_alloc_rings(struct ieee80211_hw *dev) diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 1476314afa8a..298601436ee2 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c @@ -1728,7 +1728,7 @@ static void at76_mac80211_tx_callback(struct urb *urb) ieee80211_wake_queues(priv->hw); } -static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct at76_priv *priv = hw->priv; struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer; @@ -1741,7 +1741,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (priv->tx_urb->status == -EINPROGRESS) { wiphy_err(priv->hw->wiphy, "%s called while tx urb is pending\n", __func__); - return NETDEV_TX_BUSY; + dev_kfree_skb_any(skb); + return; } /* The following code lines are important when the device is going to @@ -1755,7 +1756,8 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (compare_ether_addr(priv->bssid, mgmt->bssid)) { memcpy(priv->bssid, mgmt->bssid, ETH_ALEN); ieee80211_queue_work(hw, &priv->work_join_bssid); - return NETDEV_TX_BUSY; + dev_kfree_skb_any(skb); + return; } } @@ -1795,8 +1797,6 @@ static int at76_mac80211_tx(struct ieee80211_hw *hw, struct sk_buff *skb) priv->tx_urb, priv->tx_urb->hcpriv, priv->tx_urb->complete); } - - return 0; } static int at76_mac80211_start(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/at76c50x-usb.h b/drivers/net/wireless/at76c50x-usb.h index 4a37447dfc01..f14a65473fe8 100644 --- a/drivers/net/wireless/at76c50x-usb.h +++ b/drivers/net/wireless/at76c50x-usb.h @@ -290,7 +290,7 @@ struct mib_mac_mgmt { u8 res; u8 multi_domain_capability_implemented; u8 multi_domain_capability_enabled; - u8 country_string[3]; + u8 country_string[IEEE80211_COUNTRY_STRING_LEN]; u8 reserved[3]; } __packed; diff --git a/drivers/net/wireless/ath/ar9170/Kconfig b/drivers/net/wireless/ath/ar9170/Kconfig index d7a4799d20fb..7b9672b0d090 100644 --- a/drivers/net/wireless/ath/ar9170/Kconfig +++ b/drivers/net/wireless/ath/ar9170/Kconfig @@ -1,8 +1,10 @@ config AR9170_USB - tristate "Atheros AR9170 802.11n USB support" + tristate "Atheros AR9170 802.11n USB support (OBSOLETE)" depends on USB && MAC80211 select FW_LOADER help + This driver is going to get replaced by carl9170. + This is a driver for the Atheros "otus" 802.11n USB devices. These devices require additional firmware (2 files). diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h index 4f845f80c098..371e4ce49528 100644 --- a/drivers/net/wireless/ath/ar9170/ar9170.h +++ b/drivers/net/wireless/ath/ar9170/ar9170.h @@ -224,7 +224,7 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len); int ar9170_nag_limiter(struct ar9170 *ar); /* MAC */ -int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); int ar9170_init_mac(struct ar9170 *ar); int ar9170_set_qos(struct ar9170 *ar); int ar9170_update_multicast(struct ar9170 *ar, const u64 mc_hast); diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c index 32bf79e6a320..b761fec0d721 100644 --- a/drivers/net/wireless/ath/ar9170/main.c +++ b/drivers/net/wireless/ath/ar9170/main.c @@ -1475,7 +1475,7 @@ static void ar9170_tx(struct ar9170 *ar) msecs_to_jiffies(AR9170_JANITOR_DELAY)); } -int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +void ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ar9170 *ar = hw->priv; struct ieee80211_tx_info *info; @@ -1493,11 +1493,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) skb_queue_tail(&ar->tx_pending[queue], skb); ar9170_tx(ar); - return NETDEV_TX_OK; + return; err_free: dev_kfree_skb_any(skb); - return NETDEV_TX_OK; } static int ar9170_op_add_interface(struct ieee80211_hw *hw, @@ -1945,7 +1944,8 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue, static int ar9170_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn) + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) { switch (action) { case IEEE80211_AMPDU_RX_START: diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h index e43210c8585c..a6c6a466000f 100644 --- a/drivers/net/wireless/ath/ath.h +++ b/drivers/net/wireless/ath/ath.h @@ -108,12 +108,14 @@ enum ath_cipher { * struct ath_ops - Register read/write operations * * @read: Register read + * @multi_read: Multiple register read * @write: Register write * @enable_write_buffer: Enable multiple register writes * @write_flush: flush buffered register writes and disable buffering */ struct ath_ops { unsigned int (*read)(void *, u32 reg_offset); + void (*multi_read)(void *, u32 *addr, u32 *val, u16 count); void (*write)(void *, u32 val, u32 reg_offset); void (*enable_write_buffer)(void *); void (*write_flush) (void *); diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig index e0793319389d..e18a9aa7b6ca 100644 --- a/drivers/net/wireless/ath/ath5k/Kconfig +++ b/drivers/net/wireless/ath/ath5k/Kconfig @@ -40,6 +40,17 @@ config ATH5K_DEBUG modprobe ath5k debug=0x00000400 +config ATH5K_TRACER + bool "Atheros 5xxx tracer" + depends on ATH5K + depends on EVENT_TRACING + ---help--- + Say Y here to enable tracepoints for the ath5k driver + using the kernel tracing infrastructure. Select this + option if you are interested in debugging the driver. + + If unsure, say N. + config ATH5K_AHB bool "Atheros 5xxx AHB bus support" depends on (ATHEROS_AR231X && !PCI) diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c index 707cde149248..82324e98efef 100644 --- a/drivers/net/wireless/ath/ath5k/ahb.c +++ b/drivers/net/wireless/ath/ath5k/ahb.c @@ -31,7 +31,8 @@ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz) *csz = L1_CACHE_BYTES >> 2; } -bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) +static bool +ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) { struct ath5k_softc *sc = common->priv; struct platform_device *pdev = to_platform_device(sc->dev); @@ -46,10 +47,10 @@ bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) eeprom += off; if (eeprom > eeprom_end) - return -EINVAL; + return false; *data = *eeprom; - return 0; + return true; } int ath5k_hw_read_srev(struct ath5k_hw *ah) @@ -92,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev) goto err_out; } - mem = ioremap_nocache(res->start, res->end - res->start + 1); + mem = ioremap_nocache(res->start, resource_size(res)); if (mem == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h index 407e39c2b10b..8a06dbd39629 100644 --- a/drivers/net/wireless/ath/ath5k/ath5k.h +++ b/drivers/net/wireless/ath/ath5k/ath5k.h @@ -210,14 +210,9 @@ /* Initial values */ #define AR5K_INIT_CYCRSSI_THR1 2 -/* Tx retry limits */ -#define AR5K_INIT_SH_RETRY 10 -#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY -/* For station mode */ -#define AR5K_INIT_SSH_RETRY 32 -#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY -#define AR5K_INIT_TX_RETRY 10 - +/* Tx retry limit defaults from standard */ +#define AR5K_INIT_RETRY_SHORT 7 +#define AR5K_INIT_RETRY_LONG 4 /* Slot time */ #define AR5K_INIT_SLOT_TIME_TURBO 6 @@ -518,7 +513,7 @@ enum ath5k_tx_queue_id { AR5K_TX_QUEUE_ID_NOQCU_DATA = 0, AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1, AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/ - AR5K_TX_QUEUE_ID_DATA_MAX = 4, /*IEEE80211_TX_QUEUE_DATA4*/ + AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/ AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/ AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/ AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/ @@ -1057,7 +1052,9 @@ struct ath5k_hw { #define ah_modes ah_capabilities.cap_mode #define ah_ee_version ah_capabilities.cap_eeprom.ee_version - u32 ah_limit_tx_retries; + u8 ah_retry_long; + u8 ah_retry_short; + u8 ah_coverage_class; bool ah_ack_bitrate_high; u8 ah_bwmode; @@ -1067,7 +1064,6 @@ struct ath5k_hw { u8 ah_ant_mode; u8 ah_tx_ant; u8 ah_def_ant; - bool ah_software_retry; struct ath5k_capabilities ah_capabilities; @@ -1162,6 +1158,26 @@ void ath5k_hw_deinit(struct ath5k_hw *ah); int ath5k_sysfs_register(struct ath5k_softc *sc); void ath5k_sysfs_unregister(struct ath5k_softc *sc); +/* base.c */ +struct ath5k_buf; +struct ath5k_txq; + +void set_beacon_filter(struct ieee80211_hw *hw, bool enable); +bool ath_any_vif_assoc(struct ath5k_softc *sc); +void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ath5k_txq *txq); +int ath5k_init_hw(struct ath5k_softc *sc); +int ath5k_stop_hw(struct ath5k_softc *sc); +void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif); +void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc, + struct ieee80211_vif *vif); +int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan); +void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf); +int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +void ath5k_beacon_config(struct ath5k_softc *sc); +void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf); +void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf); + /*Chip id helper functions */ const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val); int ath5k_hw_read_srev(struct ath5k_hw *ah); @@ -1250,6 +1266,8 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, struct ath5k_txq_info *queue_info); +void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, + unsigned int queue); u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue); void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue); int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue); diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c index cdac5cff0177..bc8240560488 100644 --- a/drivers/net/wireless/ath/ath5k/attach.c +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -118,8 +118,8 @@ int ath5k_hw_init(struct ath5k_softc *sc) ah->ah_bwmode = AR5K_BWMODE_DEFAULT; ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; ah->ah_imr = 0; - ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY; - ah->ah_software_retry = false; + ah->ah_retry_short = AR5K_INIT_RETRY_SHORT; + ah->ah_retry_long = AR5K_INIT_RETRY_LONG; ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT; ah->ah_noise_floor = -95; /* until first NF calibration is run */ sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO; @@ -220,7 +220,8 @@ int ath5k_hw_init(struct ath5k_softc *sc) ah->ah_radio = AR5K_RF5112; ah->ah_single_chip = false; ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B; - } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4)) { + } else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4) || + ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) { ah->ah_radio = AR5K_RF2316; ah->ah_single_chip = true; ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 09ae4ef0fd51..4d7f21ee111c 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -61,6 +61,9 @@ #include "debug.h" #include "ani.h" +#define CREATE_TRACE_POINTS +#include "trace.h" + int ath5k_modparam_nohwcrypt; module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); @@ -242,73 +245,68 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re \********************/ /* - * Convert IEEE channel number to MHz frequency. - */ -static inline short -ath5k_ieee2mhz(short chan) -{ - if (chan <= 14 || chan >= 27) - return ieee80211chan2mhz(chan); - else - return 2212 + chan * 20; -} - -/* * Returns true for the channel numbers used without all_channels modparam. */ -static bool ath5k_is_standard_channel(short chan) +static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band) { - return ((chan <= 14) || - /* UNII 1,2 */ - ((chan & 3) == 0 && chan >= 36 && chan <= 64) || + if (band == IEEE80211_BAND_2GHZ && chan <= 14) + return true; + + return /* UNII 1,2 */ + (((chan & 3) == 0 && chan >= 36 && chan <= 64) || /* midband */ ((chan & 3) == 0 && chan >= 100 && chan <= 140) || /* UNII-3 */ - ((chan & 3) == 1 && chan >= 149 && chan <= 165)); + ((chan & 3) == 1 && chan >= 149 && chan <= 165) || + /* 802.11j 5.030-5.080 GHz (20MHz) */ + (chan == 8 || chan == 12 || chan == 16) || + /* 802.11j 4.9GHz (20MHz) */ + (chan == 184 || chan == 188 || chan == 192 || chan == 196)); } static unsigned int -ath5k_copy_channels(struct ath5k_hw *ah, - struct ieee80211_channel *channels, - unsigned int mode, - unsigned int max) +ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels, + unsigned int mode, unsigned int max) { - unsigned int i, count, size, chfreq, freq, ch; - - if (!test_bit(mode, ah->ah_modes)) - return 0; + unsigned int count, size, chfreq, freq, ch; + enum ieee80211_band band; switch (mode) { case AR5K_MODE_11A: /* 1..220, but 2GHz frequencies are filtered by check_channel */ - size = 220 ; + size = 220; chfreq = CHANNEL_5GHZ; + band = IEEE80211_BAND_5GHZ; break; case AR5K_MODE_11B: case AR5K_MODE_11G: size = 26; chfreq = CHANNEL_2GHZ; + band = IEEE80211_BAND_2GHZ; break; default: ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n"); return 0; } - for (i = 0, count = 0; i < size && max > 0; i++) { - ch = i + 1 ; - freq = ath5k_ieee2mhz(ch); + count = 0; + for (ch = 1; ch <= size && count < max; ch++) { + freq = ieee80211_channel_to_frequency(ch, band); + + if (freq == 0) /* mapping failed - not a standard channel */ + continue; /* Check if channel is supported by the chipset */ if (!ath5k_channel_ok(ah, freq, chfreq)) continue; - if (!modparam_all_channels && !ath5k_is_standard_channel(ch)) + if (!modparam_all_channels && + !ath5k_is_standard_channel(ch, band)) continue; /* Write channel info and increment counter */ channels[count].center_freq = freq; - channels[count].band = (chfreq == CHANNEL_2GHZ) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + channels[count].band = band; switch (mode) { case AR5K_MODE_11A: case AR5K_MODE_11G: @@ -319,7 +317,6 @@ ath5k_copy_channels(struct ath5k_hw *ah, } count++; - max--; } return count; @@ -364,7 +361,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) sband->n_bitrates = 12; sband->channels = sc->channels; - sband->n_channels = ath5k_copy_channels(ah, sband->channels, + sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11G, max_c); hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; @@ -390,7 +387,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) } sband->channels = sc->channels; - sband->n_channels = ath5k_copy_channels(ah, sband->channels, + sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11B, max_c); hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; @@ -410,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw) sband->n_bitrates = 8; sband->channels = &sc->channels[count_c]; - sband->n_channels = ath5k_copy_channels(ah, sband->channels, + sband->n_channels = ath5k_setup_channels(ah, sband->channels, AR5K_MODE_11A, max_c); hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; @@ -445,31 +442,9 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) return ath5k_reset(sc, chan, true); } -static void -ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode) -{ - sc->curmode = mode; - - if (mode == AR5K_MODE_11A) { - sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ]; - } else { - sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ]; - } -} - -struct ath_vif_iter_data { - const u8 *hw_macaddr; - u8 mask[ETH_ALEN]; - u8 active_mac[ETH_ALEN]; /* first active MAC */ - bool need_set_hw_addr; - bool found_active; - bool any_assoc; - enum nl80211_iftype opmode; -}; - -static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct ath_vif_iter_data *iter_data = data; + struct ath5k_vif_iter_data *iter_data = data; int i; struct ath5k_vif *avf = (void *)vif->drv_priv; @@ -499,9 +474,12 @@ static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) */ if (avf->opmode == NL80211_IFTYPE_AP) iter_data->opmode = NL80211_IFTYPE_AP; - else + else { + if (avf->opmode == NL80211_IFTYPE_STATION) + iter_data->n_stas++; if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED) iter_data->opmode = avf->opmode; + } } void @@ -509,7 +487,8 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc, struct ieee80211_vif *vif) { struct ath_common *common = ath5k_hw_common(sc->ah); - struct ath_vif_iter_data iter_data; + struct ath5k_vif_iter_data iter_data; + u32 rfilt; /* * Use the hardware MAC address as reference, the hardware uses it @@ -520,12 +499,13 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc, iter_data.found_active = false; iter_data.need_set_hw_addr = true; iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED; + iter_data.n_stas = 0; if (vif) - ath_vif_iter(&iter_data, vif->addr, vif); + ath5k_vif_iter(&iter_data, vif->addr, vif); /* Get list of all active MAC addresses */ - ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter, + ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter, &iter_data); memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN); @@ -543,20 +523,19 @@ ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc, if (ath5k_hw_hasbssidmask(sc->ah)) ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask); -} -void -ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif) -{ - struct ath5k_hw *ah = sc->ah; - u32 rfilt; + /* Set up RX Filter */ + if (iter_data.n_stas > 1) { + /* If you have multiple STA interfaces connected to + * different APs, ARPs are not received (most of the time?) + * Enabling PROMISC appears to fix that probem. + */ + sc->filter_flags |= AR5K_RX_FILTER_PROM; + } - /* configure rx filter */ rfilt = sc->filter_flags; - ath5k_hw_set_rx_filter(ah, rfilt); + ath5k_hw_set_rx_filter(sc->ah, rfilt); ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); - - ath5k_update_bssid_mask_and_opmode(sc, vif); } static inline int @@ -569,7 +548,7 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) "hw_rix out of bounds: %x\n", hw_rix)) return 0; - rix = sc->rate_idx[sc->curband->band][hw_rix]; + rix = sc->rate_idx[sc->curchan->band][hw_rix]; if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) rix = 0; @@ -964,6 +943,7 @@ ath5k_txq_setup(struct ath5k_softc *sc, spin_lock_init(&txq->lock); txq->setup = true; txq->txq_len = 0; + txq->txq_max = ATH5K_TXQ_LEN_MAX; txq->txq_poll_mark = false; txq->txq_stuck = 0; } @@ -1132,7 +1112,7 @@ ath5k_rx_start(struct ath5k_softc *sc) spin_unlock_bh(&sc->rxbuflock); ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */ - ath5k_mode_setup(sc, NULL); /* set filters, etc. */ + ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */ ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ return 0; @@ -1376,10 +1356,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb, * right now, so it's not too bad... */ rxs->mactime = ath5k_extend_tsf(sc->ah, rs->rs_tstamp); - rxs->flag |= RX_FLAG_TSFT; + rxs->flag |= RX_FLAG_MACTIME_MPDU; rxs->freq = sc->curchan->center_freq; - rxs->band = sc->curband->band; + rxs->band = sc->curchan->band; rxs->signal = sc->ah->ah_noise_floor + rs->rs_rssi; @@ -1394,10 +1374,10 @@ ath5k_receive_frame(struct ath5k_softc *sc, struct sk_buff *skb, rxs->flag |= ath5k_rx_decrypted(sc, skb, rs); if (rxs->rate_idx >= 0 && rs->rs_rate == - sc->curband->bitrates[rxs->rate_idx].hw_value_short) + sc->sbands[sc->curchan->band].bitrates[rxs->rate_idx].hw_value_short) rxs->flag |= RX_FLAG_SHORTPRE; - ath5k_debug_dump_skb(sc, skb, "RX ", 0); + trace_ath5k_rx(sc, skb); ath5k_update_beacon_rssi(sc, skb, rs->rs_rssi); @@ -1533,7 +1513,7 @@ unlock: * TX Handling * \*************/ -int +void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, struct ath5k_txq *txq) { @@ -1542,7 +1522,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, unsigned long flags; int padsize; - ath5k_debug_dump_skb(sc, skb, "TX ", 1); + trace_ath5k_tx(sc, skb, txq); /* * The hardware expects the header padded to 4 byte boundaries. @@ -1555,7 +1535,7 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, goto drop_packet; } - if (txq->txq_len >= ATH5K_TXQ_LEN_MAX) + if (txq->txq_len >= txq->txq_max) ieee80211_stop_queue(hw, txq->qnum); spin_lock_irqsave(&sc->txbuflock, flags); @@ -1582,16 +1562,15 @@ ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, spin_unlock_irqrestore(&sc->txbuflock, flags); goto drop_packet; } - return NETDEV_TX_OK; + return; drop_packet: dev_kfree_skb_any(skb); - return NETDEV_TX_OK; } static void ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb, - struct ath5k_tx_status *ts) + struct ath5k_txq *txq, struct ath5k_tx_status *ts) { struct ieee80211_tx_info *info; int i; @@ -1643,6 +1622,7 @@ ath5k_tx_frame_completed(struct ath5k_softc *sc, struct sk_buff *skb, else sc->stats.antenna_tx[0]++; /* invalid */ + trace_ath5k_tx_complete(sc, skb, txq, ts); ieee80211_tx_status(sc->hw, skb); } @@ -1679,7 +1659,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE); - ath5k_tx_frame_completed(sc, skb, &ts); + ath5k_tx_frame_completed(sc, skb, txq, &ts); } /* @@ -1821,8 +1801,6 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) goto out; } - ath5k_debug_dump_skb(sc, skb, "BC ", 1); - ath5k_txbuf_free_skb(sc, avf->bbuf); avf->bbuf->skb = skb; ret = ath5k_beacon_setup(sc, avf->bbuf); @@ -1917,6 +1895,8 @@ ath5k_beacon_send(struct ath5k_softc *sc) sc->opmode == NL80211_IFTYPE_MESH_POINT) ath5k_beacon_update(sc->hw, vif); + trace_ath5k_tx(sc, bf->skb, &sc->txqs[sc->bhalq]); + ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr); ath5k_hw_start_tx_dma(ah, sc->bhalq); ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", @@ -2417,7 +2397,8 @@ ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops) /* set up multi-rate retry capabilities */ if (sc->ah->ah_version == AR5K_AR5212) { hw->max_rates = 4; - hw->max_rate_tries = 11; + hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT, + AR5K_INIT_RETRY_LONG); } hw->vif_data_size = sizeof(struct ath5k_vif); @@ -2554,7 +2535,6 @@ ath5k_init_hw(struct ath5k_softc *sc) * and then setup of the interrupt mask. */ sc->curchan = sc->hw->conf.channel; - sc->curband = &sc->sbands[sc->curchan->band]; sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; @@ -2681,10 +2661,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan, * so we should also free any remaining * tx buffers */ ath5k_drain_tx_buffs(sc); - if (chan) { + if (chan) sc->curchan = chan; - sc->curband = &sc->sbands[chan->band]; - } ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL, skip_pcu); if (ret) { @@ -2782,12 +2760,6 @@ ath5k_init(struct ieee80211_hw *hw) goto err; } - /* NB: setup here so ath5k_rate_update is happy */ - if (test_bit(AR5K_MODE_11A, ah->ah_modes)) - ath5k_setcurmode(sc, AR5K_MODE_11A); - else - ath5k_setcurmode(sc, AR5K_MODE_11B); - /* * Allocate tx+rx descriptors and populate the lists. */ @@ -2946,13 +2918,13 @@ ath5k_deinit_softc(struct ath5k_softc *sc) bool ath_any_vif_assoc(struct ath5k_softc *sc) { - struct ath_vif_iter_data iter_data; + struct ath5k_vif_iter_data iter_data; iter_data.hw_macaddr = NULL; iter_data.any_assoc = false; iter_data.need_set_hw_addr = false; iter_data.found_active = true; - ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter, + ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter, &iter_data); return iter_data.any_assoc; } diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h index 6d511476e4d2..978f1f4ac2f3 100644 --- a/drivers/net/wireless/ath/ath5k/base.h +++ b/drivers/net/wireless/ath/ath5k/base.h @@ -86,6 +86,7 @@ struct ath5k_txq { spinlock_t lock; /* lock on q and link */ bool setup; int txq_len; /* number of queued buffers */ + int txq_max; /* max allowed num of queued buffers */ bool txq_poll_mark; unsigned int txq_stuck; /* informational counter */ }; @@ -183,8 +184,6 @@ struct ath5k_softc { enum nl80211_iftype opmode; struct ath5k_hw *ah; /* Atheros HW */ - struct ieee80211_supported_band *curband; - #ifdef CONFIG_ATH5K_DEBUG struct ath5k_dbg_info debug; /* debug info */ #endif /* CONFIG_ATH5K_DEBUG */ @@ -202,7 +201,6 @@ struct ath5k_softc { #define ATH_STAT_STARTED 4 /* opened & irqs enabled */ unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ - unsigned int curmode; /* current phy mode */ struct ieee80211_channel *curchan; /* current h/w channel */ u16 nvifs; @@ -262,6 +260,19 @@ struct ath5k_softc { struct survey_info survey; /* collected survey info */ }; +struct ath5k_vif_iter_data { + const u8 *hw_macaddr; + u8 mask[ETH_ALEN]; + u8 active_mac[ETH_ALEN]; /* first active MAC */ + bool need_set_hw_addr; + bool found_active; + bool any_assoc; + enum nl80211_iftype opmode; + int n_stas; +}; +void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif); + + #define ath5k_hw_hasbssidmask(_ah) \ (ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0) #define ath5k_hw_hasveol(_ah) \ diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c index 31cad80e9b01..f77e8a703c5c 100644 --- a/drivers/net/wireless/ath/ath5k/caps.c +++ b/drivers/net/wireless/ath/ath5k/caps.c @@ -32,23 +32,24 @@ */ int ath5k_hw_set_capabilities(struct ath5k_hw *ah) { + struct ath5k_capabilities *caps = &ah->ah_capabilities; u16 ee_header; /* Capabilities stored in the EEPROM */ - ee_header = ah->ah_capabilities.cap_eeprom.ee_header; + ee_header = caps->cap_eeprom.ee_header; if (ah->ah_version == AR5K_AR5210) { /* * Set radio capabilities * (The AR5110 only supports the middle 5GHz band) */ - ah->ah_capabilities.cap_range.range_5ghz_min = 5120; - ah->ah_capabilities.cap_range.range_5ghz_max = 5430; - ah->ah_capabilities.cap_range.range_2ghz_min = 0; - ah->ah_capabilities.cap_range.range_2ghz_max = 0; + caps->cap_range.range_5ghz_min = 5120; + caps->cap_range.range_5ghz_max = 5430; + caps->cap_range.range_2ghz_min = 0; + caps->cap_range.range_2ghz_max = 0; /* Set supported modes */ - __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode); + __set_bit(AR5K_MODE_11A, caps->cap_mode); } else { /* * XXX The tranceiver supports frequencies from 4920 to 6100GHz @@ -56,9 +57,8 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah) * XXX current ieee80211 implementation because the IEEE * XXX channel mapping does not support negative channel * XXX numbers (2312MHz is channel -19). Of course, this - * XXX doesn't matter because these channels are out of range - * XXX but some regulation domains like MKK (Japan) will - * XXX support frequencies somewhere around 4.8GHz. + * XXX doesn't matter because these channels are out of the + * XXX legal range. */ /* @@ -66,13 +66,14 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah) */ if (AR5K_EEPROM_HDR_11A(ee_header)) { - /* 4920 */ - ah->ah_capabilities.cap_range.range_5ghz_min = 5005; - ah->ah_capabilities.cap_range.range_5ghz_max = 6100; + if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain)) + caps->cap_range.range_5ghz_min = 4920; + else + caps->cap_range.range_5ghz_min = 5005; + caps->cap_range.range_5ghz_max = 6100; /* Set supported modes */ - __set_bit(AR5K_MODE_11A, - ah->ah_capabilities.cap_mode); + __set_bit(AR5K_MODE_11A, caps->cap_mode); } /* Enable 802.11b if a 2GHz capable radio (2111/5112) is @@ -81,32 +82,29 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah) (AR5K_EEPROM_HDR_11G(ee_header) && ah->ah_version != AR5K_AR5211)) { /* 2312 */ - ah->ah_capabilities.cap_range.range_2ghz_min = 2412; - ah->ah_capabilities.cap_range.range_2ghz_max = 2732; + caps->cap_range.range_2ghz_min = 2412; + caps->cap_range.range_2ghz_max = 2732; if (AR5K_EEPROM_HDR_11B(ee_header)) - __set_bit(AR5K_MODE_11B, - ah->ah_capabilities.cap_mode); + __set_bit(AR5K_MODE_11B, caps->cap_mode); if (AR5K_EEPROM_HDR_11G(ee_header) && ah->ah_version != AR5K_AR5211) - __set_bit(AR5K_MODE_11G, - ah->ah_capabilities.cap_mode); + __set_bit(AR5K_MODE_11G, caps->cap_mode); } } /* Set number of supported TX queues */ if (ah->ah_version == AR5K_AR5210) - ah->ah_capabilities.cap_queues.q_tx_num = - AR5K_NUM_TX_QUEUES_NOQCU; + caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU; else - ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES; + caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES; /* newer hardware has PHY error counters */ if (ah->ah_mac_srev >= AR5K_SREV_AR5213A) - ah->ah_capabilities.cap_has_phyerr_counters = true; + caps->cap_has_phyerr_counters = true; else - ah->ah_capabilities.cap_has_phyerr_counters = false; + caps->cap_has_phyerr_counters = false; return 0; } diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index d2f84d76bb07..0230f30e9e9a 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -308,8 +308,6 @@ static const struct { { ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" }, { ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" }, { ATH5K_DEBUG_LED, "led", "LED management" }, - { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" }, - { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, { ATH5K_DEBUG_DMA, "dma", "dma start/stop" }, { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" }, @@ -1036,24 +1034,6 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) } void -ath5k_debug_dump_skb(struct ath5k_softc *sc, - struct sk_buff *skb, const char *prefix, int tx) -{ - char buf[16]; - - if (likely(!((tx && (sc->debug.level & ATH5K_DEBUG_DUMP_TX)) || - (!tx && (sc->debug.level & ATH5K_DEBUG_DUMP_RX))))) - return; - - snprintf(buf, sizeof(buf), "%s %s", wiphy_name(sc->hw->wiphy), prefix); - - print_hex_dump_bytes(buf, DUMP_PREFIX_NONE, skb->data, - min(200U, skb->len)); - - printk(KERN_DEBUG "\n"); -} - -void ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) { struct ath5k_desc *ds = bf->desc; diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h index 3e34428d5126..b0355aef68d3 100644 --- a/drivers/net/wireless/ath/ath5k/debug.h +++ b/drivers/net/wireless/ath/ath5k/debug.h @@ -116,8 +116,6 @@ enum ath5k_debug_level { ATH5K_DEBUG_CALIBRATE = 0x00000020, ATH5K_DEBUG_TXPOWER = 0x00000040, ATH5K_DEBUG_LED = 0x00000080, - ATH5K_DEBUG_DUMP_RX = 0x00000100, - ATH5K_DEBUG_DUMP_TX = 0x00000200, ATH5K_DEBUG_DUMPBANDS = 0x00000400, ATH5K_DEBUG_DMA = 0x00000800, ATH5K_DEBUG_ANI = 0x00002000, @@ -152,10 +150,6 @@ void ath5k_debug_dump_bands(struct ath5k_softc *sc); void -ath5k_debug_dump_skb(struct ath5k_softc *sc, - struct sk_buff *skb, const char *prefix, int tx); - -void ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf); #else /* no debugging */ @@ -182,10 +176,6 @@ static inline void ath5k_debug_dump_bands(struct ath5k_softc *sc) {} static inline void -ath5k_debug_dump_skb(struct ath5k_softc *sc, - struct sk_buff *skb, const char *prefix, int tx) {} - -static inline void ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {} #endif /* ifdef CONFIG_ATH5K_DEBUG */ diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c index 80e625608bac..b6561f785c6e 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/drivers/net/wireless/ath/ath5k/eeprom.c @@ -72,7 +72,6 @@ static int ath5k_eeprom_init_header(struct ath5k_hw *ah) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; - int ret; u16 val; u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX; @@ -192,7 +191,7 @@ static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset, struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 o = *offset; u16 val; - int ret, i = 0; + int i = 0; AR5K_EEPROM_READ(o++, val); ee->ee_switch_settling[mode] = (val >> 8) & 0x7f; @@ -252,7 +251,6 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset, struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; u32 o = *offset; u16 val; - int ret; ee->ee_n_piers[mode] = 0; AR5K_EEPROM_READ(o++, val); @@ -515,7 +513,6 @@ ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max, int o = *offset; int i = 0; u8 freq1, freq2; - int ret; u16 val; ee->ee_n_piers[mode] = 0; @@ -551,7 +548,7 @@ ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset) { struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a; - int i, ret; + int i; u16 val; u8 mask; @@ -970,7 +967,6 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode) u32 offset; u8 i, c; u16 val; - int ret; u8 pd_gains = 0; /* Count how many curves we have and @@ -1228,7 +1224,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode) struct ath5k_chan_pcal_info *chinfo; u8 *pdgain_idx = ee->ee_pdc_to_idx[mode]; u32 offset; - int idx, i, ret; + int idx, i; u16 val; u8 pd_gains = 0; @@ -1419,7 +1415,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) u8 *rate_target_pwr_num; u32 offset; u16 val; - int ret, i; + int i; offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1); rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode]; @@ -1593,7 +1589,7 @@ ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah) struct ath5k_edge_power *rep; unsigned int fmask, pmask; unsigned int ctl_mode; - int ret, i, j; + int i, j; u32 offset; u16 val; @@ -1733,16 +1729,12 @@ int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac) u8 mac_d[ETH_ALEN] = {}; u32 total, offset; u16 data; - int octet, ret; + int octet; - ret = ath5k_hw_nvram_read(ah, 0x20, &data); - if (ret) - return ret; + AR5K_EEPROM_READ(0x20, data); for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) { - ret = ath5k_hw_nvram_read(ah, offset, &data); - if (ret) - return ret; + AR5K_EEPROM_READ(offset, data); total += data; mac_d[octet + 1] = data & 0xff; diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h index 7c09e150dbdc..6511c27d938e 100644 --- a/drivers/net/wireless/ath/ath5k/eeprom.h +++ b/drivers/net/wireless/ath/ath5k/eeprom.h @@ -241,9 +241,8 @@ enum ath5k_eeprom_freq_bands{ #define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250 #define AR5K_EEPROM_READ(_o, _v) do { \ - ret = ath5k_hw_nvram_read(ah, (_o), &(_v)); \ - if (ret) \ - return ret; \ + if (!ath5k_hw_nvram_read(ah, (_o), &(_v))) \ + return -EIO; \ } while (0) #define AR5K_EEPROM_READ_HDR(_o, _v) \ @@ -269,29 +268,6 @@ enum ath5k_ctl_mode { AR5K_CTL_MODE_M = 15, }; -/* Default CTL ids for the 3 main reg domains. - * Atheros only uses these by default but vendors - * can have up to 32 different CTLs for different - * scenarios. Note that theese values are ORed with - * the mode id (above) so we can have up to 24 CTL - * datasets out of these 3 main regdomains. That leaves - * 8 ids that can be used by vendors and since 0x20 is - * missing from HAL sources i guess this is the set of - * custom CTLs vendors can use. */ -#define AR5K_CTL_FCC 0x10 -#define AR5K_CTL_CUSTOM 0x20 -#define AR5K_CTL_ETSI 0x30 -#define AR5K_CTL_MKK 0x40 - -/* Indicates a CTL with only mode set and - * no reg domain mapping, such CTLs are used - * for world roaming domains or simply when - * a reg domain is not set */ -#define AR5K_CTL_NO_REGDOMAIN 0xf0 - -/* Indicates an empty (invalid) CTL */ -#define AR5K_CTL_NO_CTL 0xff - /* Per channel calibration data, used for power table setup */ struct ath5k_chan_pcal_info_rf5111 { /* Power levels in half dbm units diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c index d76d68c99f72..9be29b728b1c 100644 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -48,28 +48,11 @@ extern int ath5k_modparam_nohwcrypt; -/* functions used from base.c */ -void set_beacon_filter(struct ieee80211_hw *hw, bool enable); -bool ath_any_vif_assoc(struct ath5k_softc *sc); -int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, - struct ath5k_txq *txq); -int ath5k_init_hw(struct ath5k_softc *sc); -int ath5k_stop_hw(struct ath5k_softc *sc); -void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif); -void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc, - struct ieee80211_vif *vif); -int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan); -void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf); -int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -void ath5k_beacon_config(struct ath5k_softc *sc); -void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf); -void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf); - /********************\ * Mac80211 functions * \********************/ -static int +static void ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ath5k_softc *sc = hw->priv; @@ -77,10 +60,10 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) { dev_kfree_skb_any(skb); - return 0; + return; } - return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]); + ath5k_tx_queue(hw, skb, &sc->txqs[qnum]); } @@ -175,8 +158,7 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) memcpy(&avf->lladdr, vif->addr, ETH_ALEN); - ath5k_mode_setup(sc, vif); - + ath5k_update_bssid_mask_and_opmode(sc, vif); ret = 0; end: mutex_unlock(&sc->lock); @@ -226,6 +208,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed) struct ath5k_hw *ah = sc->ah; struct ieee80211_conf *conf = &hw->conf; int ret = 0; + int i; mutex_lock(&sc->lock); @@ -243,6 +226,14 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed) ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2)); } + if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { + ah->ah_retry_long = conf->long_frame_max_tx_count; + ah->ah_retry_short = conf->short_frame_max_tx_count; + + for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) + ath5k_hw_set_tx_retry_limits(ah, i); + } + /* TODO: * 1) Move this on config_interface and handle each case * separately eg. when we have only one STA vif, use @@ -389,6 +380,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; u32 mfilt[2], rfilt; + struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */ mutex_lock(&sc->lock); @@ -462,6 +454,21 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, break; } + iter_data.hw_macaddr = NULL; + iter_data.n_stas = 0; + iter_data.need_set_hw_addr = false; + ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter, + &iter_data); + + /* Set up RX Filter */ + if (iter_data.n_stas > 1) { + /* If you have multiple STA interfaces connected to + * different APs, ARPs are not received (most of the time?) + * Enabling PROMISC appears to fix that probem. + */ + rfilt |= AR5K_RX_FILTER_PROM; + } + /* Set filters */ ath5k_hw_set_rx_filter(ah, rfilt); @@ -733,6 +740,47 @@ ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) } +static void ath5k_get_ringparam(struct ieee80211_hw *hw, + u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max) +{ + struct ath5k_softc *sc = hw->priv; + + *tx = sc->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max; + + *tx_max = ATH5K_TXQ_LEN_MAX; + *rx = *rx_max = ATH_RXBUF; +} + + +static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx) +{ + struct ath5k_softc *sc = hw->priv; + u16 qnum; + + /* only support setting tx ring size for now */ + if (rx != ATH_RXBUF) + return -EINVAL; + + /* restrict tx ring size min/max */ + if (!tx || tx > ATH5K_TXQ_LEN_MAX) + return -EINVAL; + + for (qnum = 0; qnum < ARRAY_SIZE(sc->txqs); qnum++) { + if (!sc->txqs[qnum].setup) + continue; + if (sc->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN || + sc->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX) + continue; + + sc->txqs[qnum].txq_max = tx; + if (sc->txqs[qnum].txq_len >= sc->txqs[qnum].txq_max) + ieee80211_stop_queue(hw, sc->txqs[qnum].qnum); + } + + return 0; +} + + const struct ieee80211_ops ath5k_hw_ops = { .tx = ath5k_tx, .start = ath5k_start, @@ -771,4 +819,6 @@ const struct ieee80211_ops ath5k_hw_ops = { /* .napi_poll = not implemented */ .set_antenna = ath5k_set_antenna, .get_antenna = ath5k_get_antenna, + .set_ringparam = ath5k_set_ringparam, + .get_ringparam = ath5k_get_ringparam, }; diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c index 7f8c5b0e9d2a..66598a0d1df0 100644 --- a/drivers/net/wireless/ath/ath5k/pci.c +++ b/drivers/net/wireless/ath/ath5k/pci.c @@ -69,7 +69,8 @@ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz) /* * Read from eeprom */ -bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) +static bool +ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) { struct ath5k_hw *ah = (struct ath5k_hw *) common->ah; u32 status, timeout; @@ -90,15 +91,15 @@ bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data) status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS); if (status & AR5K_EEPROM_STAT_RDDONE) { if (status & AR5K_EEPROM_STAT_RDERR) - return -EIO; + return false; *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) & 0xffff); - return 0; + return true; } udelay(15); } - return -ETIMEDOUT; + return false; } int ath5k_hw_read_srev(struct ath5k_hw *ah) diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c index 2c9c9e793d4e..3343fb9e4940 100644 --- a/drivers/net/wireless/ath/ath5k/qcu.c +++ b/drivers/net/wireless/ath/ath5k/qcu.c @@ -228,24 +228,9 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, /* * Set tx retry limits on DCU */ -static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, - unsigned int queue) +void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, + unsigned int queue) { - u32 retry_lg, retry_sh; - - /* - * Calculate and set retry limits - */ - if (ah->ah_software_retry) { - /* XXX Need to test this */ - retry_lg = ah->ah_limit_tx_retries; - retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ? - AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg; - } else { - retry_lg = AR5K_INIT_LG_RETRY; - retry_sh = AR5K_INIT_SH_RETRY; - } - /* Single data queue on AR5210 */ if (ah->ah_version == AR5K_AR5210) { struct ath5k_txq_info *tq = &ah->ah_txq[queue]; @@ -255,25 +240,26 @@ static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, ath5k_hw_reg_write(ah, (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S) - | AR5K_REG_SM(AR5K_INIT_SLG_RETRY, - AR5K_NODCU_RETRY_LMT_SLG_RETRY) - | AR5K_REG_SM(AR5K_INIT_SSH_RETRY, - AR5K_NODCU_RETRY_LMT_SSH_RETRY) - | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY) - | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY), + | AR5K_REG_SM(ah->ah_retry_long, + AR5K_NODCU_RETRY_LMT_SLG_RETRY) + | AR5K_REG_SM(ah->ah_retry_short, + AR5K_NODCU_RETRY_LMT_SSH_RETRY) + | AR5K_REG_SM(ah->ah_retry_long, + AR5K_NODCU_RETRY_LMT_LG_RETRY) + | AR5K_REG_SM(ah->ah_retry_short, + AR5K_NODCU_RETRY_LMT_SH_RETRY), AR5K_NODCU_RETRY_LMT); /* DCU on AR5211+ */ } else { ath5k_hw_reg_write(ah, - AR5K_REG_SM(AR5K_INIT_SLG_RETRY, - AR5K_DCU_RETRY_LMT_SLG_RETRY) | - AR5K_REG_SM(AR5K_INIT_SSH_RETRY, - AR5K_DCU_RETRY_LMT_SSH_RETRY) | - AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) | - AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY), + AR5K_REG_SM(ah->ah_retry_long, + AR5K_DCU_RETRY_LMT_RTS) + | AR5K_REG_SM(ah->ah_retry_long, + AR5K_DCU_RETRY_LMT_STA_RTS) + | AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short), + AR5K_DCU_RETRY_LMT_STA_DATA), AR5K_QUEUE_DFS_RETRY_LIMIT(queue)); } - return; } /** diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h index fd14b9103951..e1c9abd8c879 100644 --- a/drivers/net/wireless/ath/ath5k/reg.h +++ b/drivers/net/wireless/ath/ath5k/reg.h @@ -686,16 +686,15 @@ /* * DCU retry limit registers + * all these fields don't allow zero values */ #define AR5K_DCU_RETRY_LMT_BASE 0x1080 /* Register Address -Queue0 DCU_RETRY_LMT */ -#define AR5K_DCU_RETRY_LMT_SH_RETRY 0x0000000f /* Short retry limit mask */ -#define AR5K_DCU_RETRY_LMT_SH_RETRY_S 0 -#define AR5K_DCU_RETRY_LMT_LG_RETRY 0x000000f0 /* Long retry limit mask */ -#define AR5K_DCU_RETRY_LMT_LG_RETRY_S 4 -#define AR5K_DCU_RETRY_LMT_SSH_RETRY 0x00003f00 /* Station short retry limit mask (?) */ -#define AR5K_DCU_RETRY_LMT_SSH_RETRY_S 8 -#define AR5K_DCU_RETRY_LMT_SLG_RETRY 0x000fc000 /* Station long retry limit mask (?) */ -#define AR5K_DCU_RETRY_LMT_SLG_RETRY_S 14 +#define AR5K_DCU_RETRY_LMT_RTS 0x0000000f /* RTS failure limit. Transmission fails if no CTS is received for this number of times */ +#define AR5K_DCU_RETRY_LMT_RTS_S 0 +#define AR5K_DCU_RETRY_LMT_STA_RTS 0x00003f00 /* STA RTS failure limit. If exceeded CW reset */ +#define AR5K_DCU_RETRY_LMT_STA_RTS_S 8 +#define AR5K_DCU_RETRY_LMT_STA_DATA 0x000fc000 /* STA data failure limit. If exceeded CW reset. */ +#define AR5K_DCU_RETRY_LMT_STA_DATA_S 14 #define AR5K_QUEUE_DFS_RETRY_LIMIT(_q) AR5K_QUEUE_REG(AR5K_DCU_RETRY_LMT_BASE, _q) /* diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h new file mode 100644 index 000000000000..2de68adb6240 --- /dev/null +++ b/drivers/net/wireless/ath/ath5k/trace.h @@ -0,0 +1,107 @@ +#if !defined(__TRACE_ATH5K_H) || defined(TRACE_HEADER_MULTI_READ) +#define __TRACE_ATH5K_H + +#include <linux/tracepoint.h> +#include "base.h" + +#ifndef CONFIG_ATH5K_TRACER +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif + +struct sk_buff; + +#define PRIV_ENTRY __field(struct ath5k_softc *, priv) +#define PRIV_ASSIGN __entry->priv = priv + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ath5k + +TRACE_EVENT(ath5k_rx, + TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb), + TP_ARGS(priv, skb), + TP_STRUCT__entry( + PRIV_ENTRY + __field(unsigned long, skbaddr) + __dynamic_array(u8, frame, skb->len) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->skbaddr = (unsigned long) skb; + memcpy(__get_dynamic_array(frame), skb->data, skb->len); + ), + TP_printk( + "[%p] RX skb=%lx", __entry->priv, __entry->skbaddr + ) +); + +TRACE_EVENT(ath5k_tx, + TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb, + struct ath5k_txq *q), + + TP_ARGS(priv, skb, q), + + TP_STRUCT__entry( + PRIV_ENTRY + __field(unsigned long, skbaddr) + __field(u8, qnum) + __dynamic_array(u8, frame, skb->len) + ), + + TP_fast_assign( + PRIV_ASSIGN; + __entry->skbaddr = (unsigned long) skb; + __entry->qnum = (u8) q->qnum; + memcpy(__get_dynamic_array(frame), skb->data, skb->len); + ), + + TP_printk( + "[%p] TX skb=%lx q=%d", __entry->priv, __entry->skbaddr, + __entry->qnum + ) +); + +TRACE_EVENT(ath5k_tx_complete, + TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb, + struct ath5k_txq *q, struct ath5k_tx_status *ts), + + TP_ARGS(priv, skb, q, ts), + + TP_STRUCT__entry( + PRIV_ENTRY + __field(unsigned long, skbaddr) + __field(u8, qnum) + __field(u8, ts_status) + __field(s8, ts_rssi) + __field(u8, ts_antenna) + ), + + TP_fast_assign( + PRIV_ASSIGN; + __entry->skbaddr = (unsigned long) skb; + __entry->qnum = (u8) q->qnum; + __entry->ts_status = ts->ts_status; + __entry->ts_rssi = ts->ts_rssi; + __entry->ts_antenna = ts->ts_antenna; + ), + + TP_printk( + "[%p] TX end skb=%lx q=%d stat=%x rssi=%d ant=%x", + __entry->priv, __entry->skbaddr, __entry->qnum, + __entry->ts_status, __entry->ts_rssi, __entry->ts_antenna + ) +); + +#endif /* __TRACE_ATH5K_H */ + +#ifdef CONFIG_ATH5K_TRACER + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +#include <trace/define_trace.h> + +#endif diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile index aca01621c205..4d66ca8042eb 100644 --- a/drivers/net/wireless/ath/ath9k/Makefile +++ b/drivers/net/wireless/ath/ath9k/Makefile @@ -4,7 +4,6 @@ ath9k-y += beacon.o \ main.o \ recv.o \ xmit.o \ - virtual.o \ ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o ath9k-$(CONFIG_PCI) += pci.o diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index 25a6e4417cdb..9cb0efa9b4c0 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -54,7 +54,6 @@ static struct ath_bus_ops ath_ahb_bus_ops = { static int ath_ahb_probe(struct platform_device *pdev) { void __iomem *mem; - struct ath_wiphy *aphy; struct ath_softc *sc; struct ieee80211_hw *hw; struct resource *res; @@ -76,7 +75,7 @@ static int ath_ahb_probe(struct platform_device *pdev) goto err_out; } - mem = ioremap_nocache(res->start, res->end - res->start + 1); + mem = ioremap_nocache(res->start, resource_size(res)); if (mem == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; @@ -92,8 +91,7 @@ static int ath_ahb_probe(struct platform_device *pdev) irq = res->start; - hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + - sizeof(struct ath_softc), &ath9k_ops); + hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops); if (hw == NULL) { dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); ret = -ENOMEM; @@ -103,11 +101,7 @@ static int ath_ahb_probe(struct platform_device *pdev) SET_IEEE80211_DEV(hw, &pdev->dev); platform_set_drvdata(pdev, hw); - aphy = hw->priv; - sc = (struct ath_softc *) (aphy + 1); - aphy->sc = sc; - aphy->hw = hw; - sc->pri_wiphy = aphy; + sc = hw->priv; sc->hw = hw; sc->dev = &pdev->dev; sc->mem = mem; @@ -151,8 +145,7 @@ static int ath_ahb_remove(struct platform_device *pdev) struct ieee80211_hw *hw = platform_get_drvdata(pdev); if (hw) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; void __iomem *mem = sc->mem; ath9k_deinit_device(sc); diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 5e300bd3d264..76388c6d6692 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -805,7 +805,10 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); - if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) { + if (AR_SREV_9271(ah)) { + if (!ar9285_hw_cl_cal(ah, chan)) + return false; + } else if (AR_SREV_9285_12_OR_LATER(ah)) { if (!ar9285_hw_clc(ah, chan)) return false; } else { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 4819747fa4c3..4a9271802991 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3673,7 +3673,7 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) return; reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) | - (7 << 14) | (6 << 17) | (1 << 20) | + (2 << 14) | (6 << 17) | (1 << 20) | (3 << 24) | (1 << 28); REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set); @@ -3959,19 +3959,19 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) { #define POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) /* make sure forced gain is not set */ - REG_WRITE(ah, 0xa458, 0); + REG_WRITE(ah, AR_PHY_TX_FORCED_GAIN, 0); /* Write the OFDM power per rate set */ /* 6 (LSB), 9, 12, 18 (MSB) */ - REG_WRITE(ah, 0xa3c0, + REG_WRITE(ah, AR_PHY_POWER_TX_RATE(0), POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0)); /* 24 (LSB), 36, 48, 54 (MSB) */ - REG_WRITE(ah, 0xa3c4, + REG_WRITE(ah, AR_PHY_POWER_TX_RATE(1), POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) | @@ -3980,14 +3980,14 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) /* Write the CCK power per rate set */ /* 1L (LSB), reserved, 2L, 2S (MSB) */ - REG_WRITE(ah, 0xa3c8, + REG_WRITE(ah, AR_PHY_POWER_TX_RATE(2), POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) | /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */ POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)); /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */ - REG_WRITE(ah, 0xa3cc, + REG_WRITE(ah, AR_PHY_POWER_TX_RATE(3), POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) | POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) | @@ -3997,7 +3997,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) /* Write the HT20 power per rate set */ /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ - REG_WRITE(ah, 0xa3d0, + REG_WRITE(ah, AR_PHY_POWER_TX_RATE(4), POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) | @@ -4005,7 +4005,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) ); /* 6 (LSB), 7, 12, 13 (MSB) */ - REG_WRITE(ah, 0xa3d4, + REG_WRITE(ah, AR_PHY_POWER_TX_RATE(5), POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) | @@ -4013,7 +4013,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) ); /* 14 (LSB), 15, 20, 21 */ - REG_WRITE(ah, 0xa3e4, + REG_WRITE(ah, AR_PHY_POWER_TX_RATE(9), POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) | POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) | @@ -4023,7 +4023,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) /* Mixed HT20 and HT40 rates */ /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */ - REG_WRITE(ah, 0xa3e8, + REG_WRITE(ah, AR_PHY_POWER_TX_RATE(10), POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) | POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) | @@ -4035,7 +4035,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) * correct PAR difference between HT40 and HT20/LEGACY * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */ - REG_WRITE(ah, 0xa3d8, + REG_WRITE(ah, AR_PHY_POWER_TX_RATE(6), POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) | @@ -4043,7 +4043,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) ); /* 6 (LSB), 7, 12, 13 (MSB) */ - REG_WRITE(ah, 0xa3dc, + REG_WRITE(ah, AR_PHY_POWER_TX_RATE(7), POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) | @@ -4051,7 +4051,7 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray) ); /* 14 (LSB), 15, 20, 21 */ - REG_WRITE(ah, 0xa3ec, + REG_WRITE(ah, AR_PHY_POWER_TX_RATE(11), POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) | POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) | POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) | diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 06fb2c850535..7f5de6e4448b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -28,7 +28,67 @@ */ static void ar9003_hw_init_mode_regs(struct ath_hw *ah) { - if (AR_SREV_9485(ah)) { + if (AR_SREV_9485_11(ah)) { + /* mac */ + INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); + INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], + ar9485_1_1_mac_core, + ARRAY_SIZE(ar9485_1_1_mac_core), 2); + INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST], + ar9485_1_1_mac_postamble, + ARRAY_SIZE(ar9485_1_1_mac_postamble), 5); + + /* bb */ + INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1, + ARRAY_SIZE(ar9485_1_1), 2); + INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE], + ar9485_1_1_baseband_core, + ARRAY_SIZE(ar9485_1_1_baseband_core), 2); + INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST], + ar9485_1_1_baseband_postamble, + ARRAY_SIZE(ar9485_1_1_baseband_postamble), 5); + + /* radio */ + INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0); + INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE], + ar9485_1_1_radio_core, + ARRAY_SIZE(ar9485_1_1_radio_core), 2); + INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST], + ar9485_1_1_radio_postamble, + ARRAY_SIZE(ar9485_1_1_radio_postamble), 2); + + /* soc */ + INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE], + ar9485_1_1_soc_preamble, + ARRAY_SIZE(ar9485_1_1_soc_preamble), 2); + INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0); + INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0); + + /* rx/tx gain */ + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9485_common_rx_gain_1_1, + ARRAY_SIZE(ar9485_common_rx_gain_1_1), 2); + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9485_modes_lowest_ob_db_tx_gain_1_1, + ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1), + 5); + + /* Load PCIE SERDES settings from INI */ + + /* Awake Setting */ + + INIT_INI_ARRAY(&ah->iniPcieSerdes, + ar9485_1_1_pcie_phy_clkreq_disable_L1, + ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1), + 2); + + /* Sleep Setting */ + + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, + ar9485_1_1_pcie_phy_clkreq_disable_L1, + ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1), + 2); + } else if (AR_SREV_9485(ah)) { /* mac */ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], @@ -85,8 +145,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah) /* Sleep Setting */ INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, - ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1, - ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1), + ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1, + ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1), 2); } else { /* mac */ @@ -163,7 +223,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah) switch (ar9003_hw_get_tx_gain_idx(ah)) { case 0: default: - if (AR_SREV_9485(ah)) + if (AR_SREV_9485_11(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9485_modes_lowest_ob_db_tx_gain_1_1, + ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1), + 5); + else if (AR_SREV_9485(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485Modes_lowest_ob_db_tx_gain_1_0, ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0), @@ -175,10 +240,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah) 5); break; case 1: - if (AR_SREV_9485(ah)) + if (AR_SREV_9485_11(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9485Modes_high_ob_db_tx_gain_1_1, + ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1), + 5); + else if (AR_SREV_9485(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485Modes_high_ob_db_tx_gain_1_0, - ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0), + ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_0), 5); else INIT_INI_ARRAY(&ah->iniModesTxGain, @@ -187,10 +257,15 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah) 5); break; case 2: - if (AR_SREV_9485(ah)) + if (AR_SREV_9485_11(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9485Modes_low_ob_db_tx_gain_1_1, + ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1), + 5); + else if (AR_SREV_9485(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485Modes_low_ob_db_tx_gain_1_0, - ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0), + ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_0), 5); else INIT_INI_ARRAY(&ah->iniModesTxGain, @@ -199,7 +274,12 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah) 5); break; case 3: - if (AR_SREV_9485(ah)) + if (AR_SREV_9485_11(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + ar9485Modes_high_power_tx_gain_1_1, + ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1), + 5); + else if (AR_SREV_9485(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485Modes_high_power_tx_gain_1_0, ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0), @@ -218,7 +298,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah) switch (ar9003_hw_get_rx_gain_idx(ah)) { case 0: default: - if (AR_SREV_9485(ah)) + if (AR_SREV_9485_11(ah)) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9485_common_rx_gain_1_1, + ARRAY_SIZE(ar9485_common_rx_gain_1_1), + 2); + else if (AR_SREV_9485(ah)) INIT_INI_ARRAY(&ah->iniModesRxGain, ar9485Common_rx_gain_1_0, ARRAY_SIZE(ar9485Common_rx_gain_1_0), @@ -230,7 +315,12 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah) 2); break; case 1: - if (AR_SREV_9485(ah)) + if (AR_SREV_9485_11(ah)) + INIT_INI_ARRAY(&ah->iniModesRxGain, + ar9485Common_wo_xlna_rx_gain_1_1, + ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1), + 2); + else if (AR_SREV_9485(ah)) INIT_INI_ARRAY(&ah->iniModesRxGain, ar9485Common_wo_xlna_rx_gain_1_0, ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0), diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 4ceddbbdfcee..038a0cbfc6e7 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -615,7 +615,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, */ if (rxsp->status11 & AR_CRCErr) rxs->rs_status |= ATH9K_RXERR_CRC; - if (rxsp->status11 & AR_PHYErr) { + else if (rxsp->status11 & AR_PHYErr) { phyerr = MS(rxsp->status11, AR_PHYErrCode); /* * If we reach a point here where AR_PostDelimCRCErr is @@ -638,11 +638,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, rxs->rs_phyerr = phyerr; } - } - if (rxsp->status11 & AR_DecryptCRCErr) + } else if (rxsp->status11 & AR_DecryptCRCErr) rxs->rs_status |= ATH9K_RXERR_DECRYPT; - if (rxsp->status11 & AR_MichaelErr) + else if (rxsp->status11 & AR_MichaelErr) rxs->rs_status |= ATH9K_RXERR_MIC; + if (rxsp->status11 & AR_KeyMiss) rxs->rs_status |= ATH9K_RXERR_DECRYPT; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 8d60f4f09acc..eb250d6b8038 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1020,28 +1020,29 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, static void ar9003_hw_do_getnf(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]) { - int16_t nf; - - nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR); - nfarray[0] = sign_extend32(nf, 8); - - nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR); - nfarray[1] = sign_extend32(nf, 8); +#define AR_PHY_CH_MINCCA_PWR 0x1FF00000 +#define AR_PHY_CH_MINCCA_PWR_S 20 +#define AR_PHY_CH_EXT_MINCCA_PWR 0x01FF0000 +#define AR_PHY_CH_EXT_MINCCA_PWR_S 16 - nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR); - nfarray[2] = sign_extend32(nf, 8); - - if (!IS_CHAN_HT40(ah->curchan)) - return; + int16_t nf; + int i; - nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); - nfarray[3] = sign_extend32(nf, 8); + for (i = 0; i < AR9300_MAX_CHAINS; i++) { + if (ah->rxchainmask & BIT(i)) { + nf = MS(REG_READ(ah, ah->nf_regs[i]), + AR_PHY_CH_MINCCA_PWR); + nfarray[i] = sign_extend32(nf, 8); - nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR); - nfarray[4] = sign_extend32(nf, 8); + if (IS_CHAN_HT40(ah->curchan)) { + u8 ext_idx = AR9300_MAX_CHAINS + i; - nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR); - nfarray[5] = sign_extend32(nf, 8); + nf = MS(REG_READ(ah, ah->nf_regs[ext_idx]), + AR_PHY_CH_EXT_MINCCA_PWR); + nfarray[ext_idx] = sign_extend32(nf, 8); + } + } + } } static void ar9003_hw_set_nf_limits(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index 59bab6bd8a74..8bdda2cf9dd7 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -486,6 +486,8 @@ #define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac) #define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0) +#define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2)) + #define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0) #define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4) diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index 70de3d89a7b5..71cc0a3a29fb 100644 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h @@ -667,6 +667,7 @@ static const u32 ar9485_1_0_pcie_phy_clkreq_enable_L1[][2] = { static const u32 ar9485_1_0_soc_preamble[][2] = { /* Addr allmodes */ + {0x00004090, 0x00aa10aa}, {0x000040a4, 0x00a0c9c9}, {0x00007048, 0x00000004}, }; @@ -940,4 +941,1146 @@ static const u32 ar9485_1_0_mac_core[][2] = { {0x000083cc, 0x00000200}, {0x000083d0, 0x000301ff}, }; + +static const u32 ar9485_1_1_mac_core[][2] = { + /* Addr allmodes */ + {0x00000008, 0x00000000}, + {0x00000030, 0x00020085}, + {0x00000034, 0x00000005}, + {0x00000040, 0x00000000}, + {0x00000044, 0x00000000}, + {0x00000048, 0x00000008}, + {0x0000004c, 0x00000010}, + {0x00000050, 0x00000000}, + {0x00001040, 0x002ffc0f}, + {0x00001044, 0x002ffc0f}, + {0x00001048, 0x002ffc0f}, + {0x0000104c, 0x002ffc0f}, + {0x00001050, 0x002ffc0f}, + {0x00001054, 0x002ffc0f}, + {0x00001058, 0x002ffc0f}, + {0x0000105c, 0x002ffc0f}, + {0x00001060, 0x002ffc0f}, + {0x00001064, 0x002ffc0f}, + {0x000010f0, 0x00000100}, + {0x00001270, 0x00000000}, + {0x000012b0, 0x00000000}, + {0x000012f0, 0x00000000}, + {0x0000143c, 0x00000000}, + {0x0000147c, 0x00000000}, + {0x00008000, 0x00000000}, + {0x00008004, 0x00000000}, + {0x00008008, 0x00000000}, + {0x0000800c, 0x00000000}, + {0x00008018, 0x00000000}, + {0x00008020, 0x00000000}, + {0x00008038, 0x00000000}, + {0x0000803c, 0x00000000}, + {0x00008040, 0x00000000}, + {0x00008044, 0x00000000}, + {0x00008048, 0x00000000}, + {0x0000804c, 0xffffffff}, + {0x00008054, 0x00000000}, + {0x00008058, 0x00000000}, + {0x0000805c, 0x000fc78f}, + {0x00008060, 0x0000000f}, + {0x00008064, 0x00000000}, + {0x00008070, 0x00000310}, + {0x00008074, 0x00000020}, + {0x00008078, 0x00000000}, + {0x0000809c, 0x0000000f}, + {0x000080a0, 0x00000000}, + {0x000080a4, 0x02ff0000}, + {0x000080a8, 0x0e070605}, + {0x000080ac, 0x0000000d}, + {0x000080b0, 0x00000000}, + {0x000080b4, 0x00000000}, + {0x000080b8, 0x00000000}, + {0x000080bc, 0x00000000}, + {0x000080c0, 0x2a800000}, + {0x000080c4, 0x06900168}, + {0x000080c8, 0x13881c22}, + {0x000080cc, 0x01f40000}, + {0x000080d0, 0x00252500}, + {0x000080d4, 0x00a00000}, + {0x000080d8, 0x00400000}, + {0x000080dc, 0x00000000}, + {0x000080e0, 0xffffffff}, + {0x000080e4, 0x0000ffff}, + {0x000080e8, 0x3f3f3f3f}, + {0x000080ec, 0x00000000}, + {0x000080f0, 0x00000000}, + {0x000080f4, 0x00000000}, + {0x000080fc, 0x00020000}, + {0x00008100, 0x00000000}, + {0x00008108, 0x00000052}, + {0x0000810c, 0x00000000}, + {0x00008110, 0x00000000}, + {0x00008114, 0x000007ff}, + {0x00008118, 0x000000aa}, + {0x0000811c, 0x00003210}, + {0x00008124, 0x00000000}, + {0x00008128, 0x00000000}, + {0x0000812c, 0x00000000}, + {0x00008130, 0x00000000}, + {0x00008134, 0x00000000}, + {0x00008138, 0x00000000}, + {0x0000813c, 0x0000ffff}, + {0x00008144, 0xffffffff}, + {0x00008168, 0x00000000}, + {0x0000816c, 0x00000000}, + {0x00008170, 0x18486200}, + {0x00008174, 0x33332210}, + {0x00008178, 0x00000000}, + {0x0000817c, 0x00020000}, + {0x000081c0, 0x00000000}, + {0x000081c4, 0x33332210}, + {0x000081d4, 0x00000000}, + {0x000081ec, 0x00000000}, + {0x000081f0, 0x00000000}, + {0x000081f4, 0x00000000}, + {0x000081f8, 0x00000000}, + {0x000081fc, 0x00000000}, + {0x00008240, 0x00100000}, + {0x00008244, 0x0010f400}, + {0x00008248, 0x00000800}, + {0x0000824c, 0x0001e800}, + {0x00008250, 0x00000000}, + {0x00008254, 0x00000000}, + {0x00008258, 0x00000000}, + {0x0000825c, 0x40000000}, + {0x00008260, 0x00080922}, + {0x00008264, 0x9ca00010}, + {0x00008268, 0xffffffff}, + {0x0000826c, 0x0000ffff}, + {0x00008270, 0x00000000}, + {0x00008274, 0x40000000}, + {0x00008278, 0x003e4180}, + {0x0000827c, 0x00000004}, + {0x00008284, 0x0000002c}, + {0x00008288, 0x0000002c}, + {0x0000828c, 0x000000ff}, + {0x00008294, 0x00000000}, + {0x00008298, 0x00000000}, + {0x0000829c, 0x00000000}, + {0x00008300, 0x00000140}, + {0x00008314, 0x00000000}, + {0x0000831c, 0x0000010d}, + {0x00008328, 0x00000000}, + {0x0000832c, 0x00000007}, + {0x00008330, 0x00000302}, + {0x00008334, 0x00000700}, + {0x00008338, 0x00ff0000}, + {0x0000833c, 0x02400000}, + {0x00008340, 0x000107ff}, + {0x00008344, 0xa248105b}, + {0x00008348, 0x008f0000}, + {0x0000835c, 0x00000000}, + {0x00008360, 0xffffffff}, + {0x00008364, 0xffffffff}, + {0x00008368, 0x00000000}, + {0x00008370, 0x00000000}, + {0x00008374, 0x000000ff}, + {0x00008378, 0x00000000}, + {0x0000837c, 0x00000000}, + {0x00008380, 0xffffffff}, + {0x00008384, 0xffffffff}, + {0x00008390, 0xffffffff}, + {0x00008394, 0xffffffff}, + {0x00008398, 0x00000000}, + {0x0000839c, 0x00000000}, + {0x000083a0, 0x00000000}, + {0x000083a4, 0x0000fa14}, + {0x000083a8, 0x000f0c00}, + {0x000083ac, 0x33332210}, + {0x000083b0, 0x33332210}, + {0x000083b4, 0x33332210}, + {0x000083b8, 0x33332210}, + {0x000083bc, 0x00000000}, + {0x000083c0, 0x00000000}, + {0x000083c4, 0x00000000}, + {0x000083c8, 0x00000000}, + {0x000083cc, 0x00000200}, + {0x000083d0, 0x000301ff}, +}; + +static const u32 ar9485_1_1_baseband_core[][2] = { + /* Addr allmodes */ + {0x00009800, 0xafe68e30}, + {0x00009804, 0xfd14e000}, + {0x00009808, 0x9c0a8f6b}, + {0x0000980c, 0x04800000}, + {0x00009814, 0x9280c00a}, + {0x00009818, 0x00000000}, + {0x0000981c, 0x00020028}, + {0x00009834, 0x5f3ca3de}, + {0x00009838, 0x0108ecff}, + {0x0000983c, 0x14750600}, + {0x00009880, 0x201fff00}, + {0x00009884, 0x00001042}, + {0x000098a4, 0x00200400}, + {0x000098b0, 0x52440bbe}, + {0x000098d0, 0x004b6a8e}, + {0x000098d4, 0x00000820}, + {0x000098dc, 0x00000000}, + {0x000098f0, 0x00000000}, + {0x000098f4, 0x00000000}, + {0x00009c04, 0x00000000}, + {0x00009c08, 0x03200000}, + {0x00009c0c, 0x00000000}, + {0x00009c10, 0x00000000}, + {0x00009c14, 0x00046384}, + {0x00009c18, 0x05b6b440}, + {0x00009c1c, 0x00b6b440}, + {0x00009d00, 0xc080a333}, + {0x00009d04, 0x40206c10}, + {0x00009d08, 0x009c4060}, + {0x00009d0c, 0x1883800a}, + {0x00009d10, 0x01834061}, + {0x00009d14, 0x00c00400}, + {0x00009d18, 0x00000000}, + {0x00009d1c, 0x00000000}, + {0x00009e08, 0x0038233c}, + {0x00009e24, 0x9927b515}, + {0x00009e28, 0x12ef0200}, + {0x00009e30, 0x06336f77}, + {0x00009e34, 0x6af6532f}, + {0x00009e38, 0x0cc80c00}, + {0x00009e40, 0x0d261820}, + {0x00009e4c, 0x00001004}, + {0x00009e50, 0x00ff03f1}, + {0x00009fc0, 0x80be4788}, + {0x00009fc4, 0x0001efb5}, + {0x00009fcc, 0x40000014}, + {0x0000a20c, 0x00000000}, + {0x0000a210, 0x00000000}, + {0x0000a220, 0x00000000}, + {0x0000a224, 0x00000000}, + {0x0000a228, 0x10002310}, + {0x0000a23c, 0x00000000}, + {0x0000a244, 0x0c000000}, + {0x0000a2a0, 0x00000001}, + {0x0000a2c0, 0x00000001}, + {0x0000a2c8, 0x00000000}, + {0x0000a2cc, 0x18c43433}, + {0x0000a2d4, 0x00000000}, + {0x0000a2dc, 0x00000000}, + {0x0000a2e0, 0x00000000}, + {0x0000a2e4, 0x00000000}, + {0x0000a2e8, 0x00000000}, + {0x0000a2ec, 0x00000000}, + {0x0000a2f0, 0x00000000}, + {0x0000a2f4, 0x00000000}, + {0x0000a2f8, 0x00000000}, + {0x0000a344, 0x00000000}, + {0x0000a34c, 0x00000000}, + {0x0000a350, 0x0000a000}, + {0x0000a364, 0x00000000}, + {0x0000a370, 0x00000000}, + {0x0000a390, 0x00000001}, + {0x0000a394, 0x00000444}, + {0x0000a398, 0x001f0e0f}, + {0x0000a39c, 0x0075393f}, + {0x0000a3a0, 0xb79f6427}, + {0x0000a3a4, 0x000000ff}, + {0x0000a3a8, 0x3b3b3b3b}, + {0x0000a3ac, 0x2f2f2f2f}, + {0x0000a3c0, 0x20202020}, + {0x0000a3c4, 0x22222220}, + {0x0000a3c8, 0x20200020}, + {0x0000a3cc, 0x20202020}, + {0x0000a3d0, 0x20202020}, + {0x0000a3d4, 0x20202020}, + {0x0000a3d8, 0x20202020}, + {0x0000a3dc, 0x20202020}, + {0x0000a3e0, 0x20202020}, + {0x0000a3e4, 0x20202020}, + {0x0000a3e8, 0x20202020}, + {0x0000a3ec, 0x20202020}, + {0x0000a3f0, 0x00000000}, + {0x0000a3f4, 0x00000006}, + {0x0000a3f8, 0x0cdbd380}, + {0x0000a3fc, 0x000f0f01}, + {0x0000a400, 0x8fa91f01}, + {0x0000a404, 0x00000000}, + {0x0000a408, 0x0e79e5c6}, + {0x0000a40c, 0x00820820}, + {0x0000a414, 0x1ce739cf}, + {0x0000a418, 0x2d0019ce}, + {0x0000a41c, 0x1ce739ce}, + {0x0000a420, 0x000001ce}, + {0x0000a424, 0x1ce739ce}, + {0x0000a428, 0x000001ce}, + {0x0000a42c, 0x1ce739ce}, + {0x0000a430, 0x1ce739ce}, + {0x0000a434, 0x00000000}, + {0x0000a438, 0x00001801}, + {0x0000a43c, 0x00000000}, + {0x0000a440, 0x00000000}, + {0x0000a444, 0x00000000}, + {0x0000a448, 0x04000000}, + {0x0000a44c, 0x00000001}, + {0x0000a450, 0x00010000}, + {0x0000a5c4, 0xbfad9d74}, + {0x0000a5c8, 0x0048060a}, + {0x0000a5cc, 0x00000637}, + {0x0000a760, 0x03020100}, + {0x0000a764, 0x09080504}, + {0x0000a768, 0x0d0c0b0a}, + {0x0000a76c, 0x13121110}, + {0x0000a770, 0x31301514}, + {0x0000a774, 0x35343332}, + {0x0000a778, 0x00000036}, + {0x0000a780, 0x00000838}, + {0x0000a7c0, 0x00000000}, + {0x0000a7c4, 0xfffffffc}, + {0x0000a7c8, 0x00000000}, + {0x0000a7cc, 0x00000000}, + {0x0000a7d0, 0x00000000}, + {0x0000a7d4, 0x00000004}, + {0x0000a7dc, 0x00000000}, +}; + +static const u32 ar9485Common_1_1[][2] = { + /* Addr allmodes */ + {0x00007010, 0x00000022}, + {0x00007020, 0x00000000}, + {0x00007034, 0x00000002}, + {0x00007038, 0x000004c2}, +}; + +static const u32 ar9485_1_1_baseband_postamble[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005}, + {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e}, + {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, + {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881}, + {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, + {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, + {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044}, + {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0}, + {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020}, + {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, + {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e}, + {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e}, + {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, + {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, + {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, + {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222}, + {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324}, + {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010}, + {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, + {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0}, + {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004}, + {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b}, + {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff}, + {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018}, + {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108}, + {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898}, + {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002}, + {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e}, + {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501}, + {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e}, + {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b}, + {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0}, + {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982}, + {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, + {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, + {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, +}; + +static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, + {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000}, + {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002}, + {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004}, + {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200}, + {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202}, + {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400}, + {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402}, + {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404}, + {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603}, + {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605}, + {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03}, + {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04}, + {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20}, + {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21}, + {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62}, + {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63}, + {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65}, + {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66}, + {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645}, + {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, + {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, + {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, + {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb}, + {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db}, + {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, +}; + +static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, + {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000}, + {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002}, + {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004}, + {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200}, + {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202}, + {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400}, + {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402}, + {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404}, + {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603}, + {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605}, + {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03}, + {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04}, + {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20}, + {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21}, + {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62}, + {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63}, + {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65}, + {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66}, + {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645}, + {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, + {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, + {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, + {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb}, + {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db}, + {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, +}; + +static const u32 ar9485_1_1_radio_postamble[][2] = { + /* Addr allmodes */ + {0x0001609c, 0x0b283f31}, + {0x000160ac, 0x24611800}, + {0x000160b0, 0x03284f3e}, + {0x0001610c, 0x00170000}, + {0x00016140, 0x10804008}, +}; + +static const u32 ar9485_1_1_mac_postamble[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160}, + {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c}, + {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38}, + {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00}, + {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b}, + {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810}, + {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a}, + {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440}, +}; + +static const u32 ar9485_1_1_radio_core[][2] = { + /* Addr allmodes */ + {0x00016000, 0x36db6db6}, + {0x00016004, 0x6db6db40}, + {0x00016008, 0x73800000}, + {0x0001600c, 0x00000000}, + {0x00016040, 0x7f80fff8}, + {0x0001604c, 0x000f0278}, + {0x00016050, 0x4db6db8c}, + {0x00016054, 0x6db60000}, + {0x00016080, 0x00080000}, + {0x00016084, 0x0e48048c}, + {0x00016088, 0x14214514}, + {0x0001608c, 0x119f081e}, + {0x00016090, 0x24926490}, + {0x00016098, 0xd28b3330}, + {0x000160a0, 0xc2108ffe}, + {0x000160a4, 0x812fc370}, + {0x000160a8, 0x423c8000}, + {0x000160b4, 0x92480040}, + {0x000160c0, 0x006db6db}, + {0x000160c4, 0x0186db60}, + {0x000160c8, 0x6db6db6c}, + {0x000160cc, 0x6de6fbe0}, + {0x000160d0, 0xf7dfcf3c}, + {0x00016100, 0x04cb0001}, + {0x00016104, 0xfff80015}, + {0x00016108, 0x00080010}, + {0x00016144, 0x01884080}, + {0x00016148, 0x00008040}, + {0x00016240, 0x08400000}, + {0x00016244, 0x1bf90f00}, + {0x00016248, 0x00000000}, + {0x0001624c, 0x00000000}, + {0x00016280, 0x01000015}, + {0x00016284, 0x00d30000}, + {0x00016288, 0x00318000}, + {0x0001628c, 0x50000000}, + {0x00016290, 0x4b96210f}, + {0x00016380, 0x00000000}, + {0x00016384, 0x00000000}, + {0x00016388, 0x00800700}, + {0x0001638c, 0x00800700}, + {0x00016390, 0x00800700}, + {0x00016394, 0x00000000}, + {0x00016398, 0x00000000}, + {0x0001639c, 0x00000000}, + {0x000163a0, 0x00000001}, + {0x000163a4, 0x00000001}, + {0x000163a8, 0x00000000}, + {0x000163ac, 0x00000000}, + {0x000163b0, 0x00000000}, + {0x000163b4, 0x00000000}, + {0x000163b8, 0x00000000}, + {0x000163bc, 0x00000000}, + {0x000163c0, 0x000000a0}, + {0x000163c4, 0x000c0000}, + {0x000163c8, 0x14021402}, + {0x000163cc, 0x00001402}, + {0x000163d0, 0x00000000}, + {0x000163d4, 0x00000000}, + {0x00016c40, 0x13188278}, + {0x00016c44, 0x12000000}, +}; + +static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = { + /* Addr allmodes */ + {0x00018c00, 0x10052e5e}, + {0x00018c04, 0x000801d8}, + {0x00018c08, 0x0000080c}, +}; + +static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, + {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000}, + {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002}, + {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004}, + {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200}, + {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202}, + {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400}, + {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402}, + {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404}, + {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603}, + {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605}, + {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03}, + {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04}, + {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20}, + {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21}, + {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62}, + {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63}, + {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65}, + {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66}, + {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645}, + {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, + {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, + {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, + {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb}, + {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db}, + {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, +}; + +static const u32 ar9485_1_1[][2] = { + /* Addr allmodes */ + {0x0000a580, 0x00000000}, + {0x0000a584, 0x00000000}, + {0x0000a588, 0x00000000}, + {0x0000a58c, 0x00000000}, + {0x0000a590, 0x00000000}, + {0x0000a594, 0x00000000}, + {0x0000a598, 0x00000000}, + {0x0000a59c, 0x00000000}, + {0x0000a5a0, 0x00000000}, + {0x0000a5a4, 0x00000000}, + {0x0000a5a8, 0x00000000}, + {0x0000a5ac, 0x00000000}, + {0x0000a5b0, 0x00000000}, + {0x0000a5b4, 0x00000000}, + {0x0000a5b8, 0x00000000}, + {0x0000a5bc, 0x00000000}, +}; + +static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, + {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000}, + {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006}, + {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201}, + {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203}, + {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401}, + {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403}, + {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405}, + {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604}, + {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605}, + {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04}, + {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06}, + {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24}, + {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21}, + {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20}, + {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20}, + {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62}, + {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63}, + {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65}, + {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66}, + {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645}, + {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, + {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, + {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, + {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb}, + {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a}, + {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a}, + {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a}, + {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a}, + {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b}, + {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db}, + {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, +}; + +static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = { + /* Addr allmodes */ + {0x00018c00, 0x10013e5e}, + {0x00018c04, 0x000801d8}, + {0x00018c08, 0x0000080c}, +}; + +static const u32 ar9485_1_1_soc_preamble[][2] = { + /* Addr allmodes */ + {0x00004014, 0xba280400}, + {0x00004090, 0x00aa10aa}, + {0x000040a4, 0x00a0c9c9}, + {0x00007010, 0x00000022}, + {0x00007020, 0x00000000}, + {0x00007034, 0x00000002}, + {0x00007038, 0x000004c2}, + {0x00007048, 0x00000002}, +}; + +static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = { + /* Addr allmodes */ + {0x0000a398, 0x00000000}, + {0x0000a39c, 0x6f7f0301}, + {0x0000a3a0, 0xca9228ee}, +}; + +static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = { + /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ + {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8}, + {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000}, + {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002}, + {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004}, + {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200}, + {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202}, + {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400}, + {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402}, + {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404}, + {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603}, + {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605}, + {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03}, + {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04}, + {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20}, + {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21}, + {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62}, + {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63}, + {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65}, + {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66}, + {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645}, + {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865}, + {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86}, + {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9}, + {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001eeb, 0x5a001eeb}, + {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb}, + {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb}, + {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, + {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db}, + {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260}, +}; + +static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = { + /* Addr 5G_HT2 5G_HT40 */ + {0x00009e00, 0x03721821, 0x03721821}, + {0x0000a230, 0x0000400b, 0x00004016}, + {0x0000a254, 0x00000898, 0x00001130}, +}; + +static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = { + /* Addr allmodes */ + {0x00018c00, 0x10012e5e}, + {0x00018c04, 0x000801d8}, + {0x00018c08, 0x0000080c}, +}; + +static const u32 ar9485_common_rx_gain_1_1[][2] = { + /* Addr allmodes */ + {0x0000a000, 0x00010000}, + {0x0000a004, 0x00030002}, + {0x0000a008, 0x00050004}, + {0x0000a00c, 0x00810080}, + {0x0000a010, 0x01800082}, + {0x0000a014, 0x01820181}, + {0x0000a018, 0x01840183}, + {0x0000a01c, 0x01880185}, + {0x0000a020, 0x018a0189}, + {0x0000a024, 0x02850284}, + {0x0000a028, 0x02890288}, + {0x0000a02c, 0x03850384}, + {0x0000a030, 0x03890388}, + {0x0000a034, 0x038b038a}, + {0x0000a038, 0x038d038c}, + {0x0000a03c, 0x03910390}, + {0x0000a040, 0x03930392}, + {0x0000a044, 0x03950394}, + {0x0000a048, 0x00000396}, + {0x0000a04c, 0x00000000}, + {0x0000a050, 0x00000000}, + {0x0000a054, 0x00000000}, + {0x0000a058, 0x00000000}, + {0x0000a05c, 0x00000000}, + {0x0000a060, 0x00000000}, + {0x0000a064, 0x00000000}, + {0x0000a068, 0x00000000}, + {0x0000a06c, 0x00000000}, + {0x0000a070, 0x00000000}, + {0x0000a074, 0x00000000}, + {0x0000a078, 0x00000000}, + {0x0000a07c, 0x00000000}, + {0x0000a080, 0x28282828}, + {0x0000a084, 0x28282828}, + {0x0000a088, 0x28282828}, + {0x0000a08c, 0x28282828}, + {0x0000a090, 0x28282828}, + {0x0000a094, 0x21212128}, + {0x0000a098, 0x171c1c1c}, + {0x0000a09c, 0x02020212}, + {0x0000a0a0, 0x00000202}, + {0x0000a0a4, 0x00000000}, + {0x0000a0a8, 0x00000000}, + {0x0000a0ac, 0x00000000}, + {0x0000a0b0, 0x00000000}, + {0x0000a0b4, 0x00000000}, + {0x0000a0b8, 0x00000000}, + {0x0000a0bc, 0x00000000}, + {0x0000a0c0, 0x001f0000}, + {0x0000a0c4, 0x111f1100}, + {0x0000a0c8, 0x111d111e}, + {0x0000a0cc, 0x111b111c}, + {0x0000a0d0, 0x22032204}, + {0x0000a0d4, 0x22012202}, + {0x0000a0d8, 0x221f2200}, + {0x0000a0dc, 0x221d221e}, + {0x0000a0e0, 0x33013302}, + {0x0000a0e4, 0x331f3300}, + {0x0000a0e8, 0x4402331e}, + {0x0000a0ec, 0x44004401}, + {0x0000a0f0, 0x441e441f}, + {0x0000a0f4, 0x55015502}, + {0x0000a0f8, 0x551f5500}, + {0x0000a0fc, 0x6602551e}, + {0x0000a100, 0x66006601}, + {0x0000a104, 0x661e661f}, + {0x0000a108, 0x7703661d}, + {0x0000a10c, 0x77017702}, + {0x0000a110, 0x00007700}, + {0x0000a114, 0x00000000}, + {0x0000a118, 0x00000000}, + {0x0000a11c, 0x00000000}, + {0x0000a120, 0x00000000}, + {0x0000a124, 0x00000000}, + {0x0000a128, 0x00000000}, + {0x0000a12c, 0x00000000}, + {0x0000a130, 0x00000000}, + {0x0000a134, 0x00000000}, + {0x0000a138, 0x00000000}, + {0x0000a13c, 0x00000000}, + {0x0000a140, 0x001f0000}, + {0x0000a144, 0x111f1100}, + {0x0000a148, 0x111d111e}, + {0x0000a14c, 0x111b111c}, + {0x0000a150, 0x22032204}, + {0x0000a154, 0x22012202}, + {0x0000a158, 0x221f2200}, + {0x0000a15c, 0x221d221e}, + {0x0000a160, 0x33013302}, + {0x0000a164, 0x331f3300}, + {0x0000a168, 0x4402331e}, + {0x0000a16c, 0x44004401}, + {0x0000a170, 0x441e441f}, + {0x0000a174, 0x55015502}, + {0x0000a178, 0x551f5500}, + {0x0000a17c, 0x6602551e}, + {0x0000a180, 0x66006601}, + {0x0000a184, 0x661e661f}, + {0x0000a188, 0x7703661d}, + {0x0000a18c, 0x77017702}, + {0x0000a190, 0x00007700}, + {0x0000a194, 0x00000000}, + {0x0000a198, 0x00000000}, + {0x0000a19c, 0x00000000}, + {0x0000a1a0, 0x00000000}, + {0x0000a1a4, 0x00000000}, + {0x0000a1a8, 0x00000000}, + {0x0000a1ac, 0x00000000}, + {0x0000a1b0, 0x00000000}, + {0x0000a1b4, 0x00000000}, + {0x0000a1b8, 0x00000000}, + {0x0000a1bc, 0x00000000}, + {0x0000a1c0, 0x00000000}, + {0x0000a1c4, 0x00000000}, + {0x0000a1c8, 0x00000000}, + {0x0000a1cc, 0x00000000}, + {0x0000a1d0, 0x00000000}, + {0x0000a1d4, 0x00000000}, + {0x0000a1d8, 0x00000000}, + {0x0000a1dc, 0x00000000}, + {0x0000a1e0, 0x00000000}, + {0x0000a1e4, 0x00000000}, + {0x0000a1e8, 0x00000000}, + {0x0000a1ec, 0x00000000}, + {0x0000a1f0, 0x00000396}, + {0x0000a1f4, 0x00000396}, + {0x0000a1f8, 0x00000396}, + {0x0000a1fc, 0x00000296}, +}; + +static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = { + /* Addr allmodes */ + {0x00018c00, 0x10053e5e}, + {0x00018c04, 0x000801d8}, + {0x00018c08, 0x0000080c}, +}; + +static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = { + /* Addr allmodes */ + {0x0000a000, 0x00060005}, + {0x0000a004, 0x00810080}, + {0x0000a008, 0x00830082}, + {0x0000a00c, 0x00850084}, + {0x0000a010, 0x01820181}, + {0x0000a014, 0x01840183}, + {0x0000a018, 0x01880185}, + {0x0000a01c, 0x018a0189}, + {0x0000a020, 0x02850284}, + {0x0000a024, 0x02890288}, + {0x0000a028, 0x028b028a}, + {0x0000a02c, 0x03850384}, + {0x0000a030, 0x03890388}, + {0x0000a034, 0x038b038a}, + {0x0000a038, 0x038d038c}, + {0x0000a03c, 0x03910390}, + {0x0000a040, 0x03930392}, + {0x0000a044, 0x03950394}, + {0x0000a048, 0x00000396}, + {0x0000a04c, 0x00000000}, + {0x0000a050, 0x00000000}, + {0x0000a054, 0x00000000}, + {0x0000a058, 0x00000000}, + {0x0000a05c, 0x00000000}, + {0x0000a060, 0x00000000}, + {0x0000a064, 0x00000000}, + {0x0000a068, 0x00000000}, + {0x0000a06c, 0x00000000}, + {0x0000a070, 0x00000000}, + {0x0000a074, 0x00000000}, + {0x0000a078, 0x00000000}, + {0x0000a07c, 0x00000000}, + {0x0000a080, 0x28282828}, + {0x0000a084, 0x28282828}, + {0x0000a088, 0x28282828}, + {0x0000a08c, 0x28282828}, + {0x0000a090, 0x28282828}, + {0x0000a094, 0x24242428}, + {0x0000a098, 0x171e1e1e}, + {0x0000a09c, 0x02020b0b}, + {0x0000a0a0, 0x02020202}, + {0x0000a0a4, 0x00000000}, + {0x0000a0a8, 0x00000000}, + {0x0000a0ac, 0x00000000}, + {0x0000a0b0, 0x00000000}, + {0x0000a0b4, 0x00000000}, + {0x0000a0b8, 0x00000000}, + {0x0000a0bc, 0x00000000}, + {0x0000a0c0, 0x22072208}, + {0x0000a0c4, 0x22052206}, + {0x0000a0c8, 0x22032204}, + {0x0000a0cc, 0x22012202}, + {0x0000a0d0, 0x221f2200}, + {0x0000a0d4, 0x221d221e}, + {0x0000a0d8, 0x33023303}, + {0x0000a0dc, 0x33003301}, + {0x0000a0e0, 0x331e331f}, + {0x0000a0e4, 0x4402331d}, + {0x0000a0e8, 0x44004401}, + {0x0000a0ec, 0x441e441f}, + {0x0000a0f0, 0x55025503}, + {0x0000a0f4, 0x55005501}, + {0x0000a0f8, 0x551e551f}, + {0x0000a0fc, 0x6602551d}, + {0x0000a100, 0x66006601}, + {0x0000a104, 0x661e661f}, + {0x0000a108, 0x7703661d}, + {0x0000a10c, 0x77017702}, + {0x0000a110, 0x00007700}, + {0x0000a114, 0x00000000}, + {0x0000a118, 0x00000000}, + {0x0000a11c, 0x00000000}, + {0x0000a120, 0x00000000}, + {0x0000a124, 0x00000000}, + {0x0000a128, 0x00000000}, + {0x0000a12c, 0x00000000}, + {0x0000a130, 0x00000000}, + {0x0000a134, 0x00000000}, + {0x0000a138, 0x00000000}, + {0x0000a13c, 0x00000000}, + {0x0000a140, 0x001f0000}, + {0x0000a144, 0x111f1100}, + {0x0000a148, 0x111d111e}, + {0x0000a14c, 0x111b111c}, + {0x0000a150, 0x22032204}, + {0x0000a154, 0x22012202}, + {0x0000a158, 0x221f2200}, + {0x0000a15c, 0x221d221e}, + {0x0000a160, 0x33013302}, + {0x0000a164, 0x331f3300}, + {0x0000a168, 0x4402331e}, + {0x0000a16c, 0x44004401}, + {0x0000a170, 0x441e441f}, + {0x0000a174, 0x55015502}, + {0x0000a178, 0x551f5500}, + {0x0000a17c, 0x6602551e}, + {0x0000a180, 0x66006601}, + {0x0000a184, 0x661e661f}, + {0x0000a188, 0x7703661d}, + {0x0000a18c, 0x77017702}, + {0x0000a190, 0x00007700}, + {0x0000a194, 0x00000000}, + {0x0000a198, 0x00000000}, + {0x0000a19c, 0x00000000}, + {0x0000a1a0, 0x00000000}, + {0x0000a1a4, 0x00000000}, + {0x0000a1a8, 0x00000000}, + {0x0000a1ac, 0x00000000}, + {0x0000a1b0, 0x00000000}, + {0x0000a1b4, 0x00000000}, + {0x0000a1b8, 0x00000000}, + {0x0000a1bc, 0x00000000}, + {0x0000a1c0, 0x00000000}, + {0x0000a1c4, 0x00000000}, + {0x0000a1c8, 0x00000000}, + {0x0000a1cc, 0x00000000}, + {0x0000a1d0, 0x00000000}, + {0x0000a1d4, 0x00000000}, + {0x0000a1d8, 0x00000000}, + {0x0000a1dc, 0x00000000}, + {0x0000a1e0, 0x00000000}, + {0x0000a1e4, 0x00000000}, + {0x0000a1e8, 0x00000000}, + {0x0000a1ec, 0x00000000}, + {0x0000a1f0, 0x00000396}, + {0x0000a1f4, 0x00000396}, + {0x0000a1f8, 0x00000396}, + {0x0000a1fc, 0x00000296}, +}; + #endif diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 1a7fa6ea4cf5..099bd4183ad0 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -92,9 +92,9 @@ struct ath_config { * @BUF_XRETRY: To denote excessive retries of the buffer */ enum buffer_type { - BUF_AMPDU = BIT(2), - BUF_AGGR = BIT(3), - BUF_XRETRY = BIT(5), + BUF_AMPDU = BIT(0), + BUF_AGGR = BIT(1), + BUF_XRETRY = BIT(2), }; #define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU) @@ -134,7 +134,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ WME_AC_VO) -#define ADDBA_EXCHANGE_ATTEMPTS 10 #define ATH_AGGR_DELIM_SZ 4 #define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */ /* number of delimiters for encryption padding */ @@ -181,7 +180,8 @@ enum ATH_AGGR_STATUS { #define ATH_TXFIFO_DEPTH 8 struct ath_txq { - u32 axq_qnum; + int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */ + u32 axq_qnum; /* ath9k hardware queue number */ u32 *axq_link; struct list_head axq_q; spinlock_t axq_lock; @@ -231,7 +231,6 @@ struct ath_buf { bool bf_stale; u16 bf_flags; struct ath_buf_state bf_state; - struct ath_wiphy *aphy; }; struct ath_atx_tid { @@ -252,7 +251,10 @@ struct ath_atx_tid { }; struct ath_node { - struct ath_common *common; +#ifdef CONFIG_ATH9K_DEBUGFS + struct list_head list; /* for sc->nodes */ + struct ieee80211_sta *sta; /* station struct we're part of */ +#endif struct ath_atx_tid tid[WME_NUM_TID]; struct ath_atx_ac ac[WME_NUM_AC]; u16 maxampdu; @@ -275,6 +277,11 @@ struct ath_tx_control { #define ATH_TX_XRETRY 0x02 #define ATH_TX_BAR 0x04 +/** + * @txq_map: Index is mac80211 queue number. This is + * not necessarily the same as the hardware queue number + * (axq_qnum). + */ struct ath_tx { u16 seq_no; u32 txqsetup; @@ -301,6 +308,8 @@ struct ath_rx { struct ath_descdma rxdma; struct ath_buf *rx_bufptr; struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX]; + + struct sk_buff *frag; }; int ath_startrecv(struct ath_softc *sc); @@ -337,10 +346,10 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid struct ath_vif { int av_bslot; + bool is_bslot_active; __le64 tsf_adjust; /* TSF adjustment for staggered beacons */ enum nl80211_iftype av_opmode; struct ath_buf *av_bcbuf; - struct ath_tx_control av_btxctl; u8 bssid[ETH_ALEN]; /* current BSSID from config_interface */ }; @@ -360,7 +369,7 @@ struct ath_vif { #define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) struct ath_beacon_config { - u16 beacon_interval; + int beacon_interval; u16 listen_interval; u16 dtim_period; u16 bmiss_timeout; @@ -379,7 +388,6 @@ struct ath_beacon { u32 ast_be_xmit; u64 bc_tstamp; struct ieee80211_vif *bslot[ATH_BCBUF]; - struct ath_wiphy *bslot_aphy[ATH_BCBUF]; int slottime; int slotupdate; struct ath9k_tx_queue_info beacon_qi; @@ -390,9 +398,10 @@ struct ath_beacon { void ath_beacon_tasklet(unsigned long data); void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif); -int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif); +int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif); void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp); int ath_beaconq_config(struct ath_softc *sc); +void ath9k_set_beaconing_status(struct ath_softc *sc, bool status); /*******/ /* ANI */ @@ -439,26 +448,21 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc); #define ATH_LED_PIN_DEF 1 #define ATH_LED_PIN_9287 8 -#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */ -#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */ - -enum ath_led_type { - ATH_LED_RADIO, - ATH_LED_ASSOC, - ATH_LED_TX, - ATH_LED_RX -}; - -struct ath_led { - struct ath_softc *sc; - struct led_classdev led_cdev; - enum ath_led_type led_type; - char name[32]; - bool registered; -}; +#define ATH_LED_PIN_9485 6 +#ifdef CONFIG_MAC80211_LEDS void ath_init_leds(struct ath_softc *sc); void ath_deinit_leds(struct ath_softc *sc); +#else +static inline void ath_init_leds(struct ath_softc *sc) +{ +} + +static inline void ath_deinit_leds(struct ath_softc *sc) +{ +} +#endif + /* Antenna diversity/combining */ #define ATH_ANT_RX_CURRENT_SHIFT 4 @@ -527,7 +531,6 @@ struct ath_ant_comb { #define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ #define ATH_MAX_SW_RETRIES 10 #define ATH_CHAN_MAX 255 -#define IEEE80211_WEP_NKID 4 /* number of key ids */ #define ATH_TXPOWER_MAX 100 /* .5 dBm units */ #define ATH_RATE_DUMMY_MARKER 0 @@ -555,27 +558,28 @@ struct ath_ant_comb { #define PS_WAIT_FOR_TX_ACK BIT(3) #define PS_BEACON_SYNC BIT(4) -struct ath_wiphy; struct ath_rate_table; +struct ath9k_vif_iter_data { + const u8 *hw_macaddr; /* phy's hardware address, set + * before starting iteration for + * valid bssid mask. + */ + u8 mask[ETH_ALEN]; /* bssid mask */ + int naps; /* number of AP vifs */ + int nmeshes; /* number of mesh vifs */ + int nstations; /* number of station vifs */ + int nwds; /* number of nwd vifs */ + int nadhocs; /* number of adhoc vifs */ + int nothers; /* number of vifs not specified above. */ +}; + struct ath_softc { struct ieee80211_hw *hw; struct device *dev; - spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */ - struct ath_wiphy *pri_wiphy; - struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may - * have NULL entries */ - int num_sec_wiphy; /* number of sec_wiphy pointers in the array */ int chan_idx; int chan_is_ht; - struct ath_wiphy *next_wiphy; - struct work_struct chan_work; - int wiphy_select_failures; - unsigned long wiphy_select_first_fail; - struct delayed_work wiphy_work; - unsigned long wiphy_scheduler_int; - int wiphy_scheduler_index; struct survey_info *cur_survey; struct survey_info survey[ATH9K_NUM_CHANNELS]; @@ -592,14 +596,16 @@ struct ath_softc { struct work_struct hw_check_work; struct completion paprd_complete; + unsigned int hw_busy_count; + u32 intrstatus; u32 sc_flags; /* SC_OP_* */ u16 ps_flags; /* PS_* */ u16 curtxpow; - u8 nbcnvifs; - u16 nvifs; bool ps_enabled; bool ps_idle; + short nbcnvifs; + short nvifs; unsigned long ps_usecount; struct ath_config config; @@ -608,23 +614,24 @@ struct ath_softc { struct ath_beacon beacon; struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; - struct ath_led radio_led; - struct ath_led assoc_led; - struct ath_led tx_led; - struct ath_led rx_led; - struct delayed_work ath_led_blink_work; - int led_on_duration; - int led_off_duration; - int led_on_cnt; - int led_off_cnt; +#ifdef CONFIG_MAC80211_LEDS + bool led_registered; + char led_name[32]; + struct led_classdev led_cdev; +#endif - int beacon_interval; + struct ath9k_hw_cal_data caldata; + int last_rssi; #ifdef CONFIG_ATH9K_DEBUGFS struct ath9k_debug debug; + spinlock_t nodes_lock; + struct list_head nodes; /* basically, stations */ + unsigned int tx_complete_poll_work_seen; #endif struct ath_beacon_config cur_beacon_conf; struct delayed_work tx_complete_work; + struct delayed_work hw_pll_work; struct ath_btcoex btcoex; struct ath_descdma txsdma; @@ -632,23 +639,6 @@ struct ath_softc { struct ath_ant_comb ant_comb; }; -struct ath_wiphy { - struct ath_softc *sc; /* shared for all virtual wiphys */ - struct ieee80211_hw *hw; - struct ath9k_hw_cal_data caldata; - enum ath_wiphy_state { - ATH_WIPHY_INACTIVE, - ATH_WIPHY_ACTIVE, - ATH_WIPHY_PAUSING, - ATH_WIPHY_PAUSED, - ATH_WIPHY_SCAN, - } state; - bool idle; - int chan_idx; - int chan_is_ht; - int last_rssi; -}; - void ath9k_tasklet(unsigned long data); int ath_reset(struct ath_softc *sc, bool retry_tx); int ath_cabq_update(struct ath_softc *); @@ -669,14 +659,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, const struct ath_bus_ops *bus_ops); void ath9k_deinit_device(struct ath_softc *sc); void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); -void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, - struct ath9k_channel *ichan); int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, struct ath9k_channel *hchan); void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw); void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw); bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode); +bool ath9k_uses_beacons(int type); #ifdef CONFIG_PCI int ath_pci_init(void); @@ -700,26 +689,12 @@ void ath9k_ps_restore(struct ath_softc *sc); u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate); void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif); -int ath9k_wiphy_add(struct ath_softc *sc); -int ath9k_wiphy_del(struct ath_wiphy *aphy); -void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype); -int ath9k_wiphy_pause(struct ath_wiphy *aphy); -int ath9k_wiphy_unpause(struct ath_wiphy *aphy); -int ath9k_wiphy_select(struct ath_wiphy *aphy); -void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int); -void ath9k_wiphy_chan_work(struct work_struct *work); -bool ath9k_wiphy_started(struct ath_softc *sc); -void ath9k_wiphy_pause_all_forced(struct ath_softc *sc, - struct ath_wiphy *selected); -bool ath9k_wiphy_scanning(struct ath_softc *sc); -void ath9k_wiphy_work(struct work_struct *work); -bool ath9k_all_wiphys_idle(struct ath_softc *sc); -void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle); - -void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue); -bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue); void ath_start_rfkill_poll(struct ath_softc *sc); extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw); +void ath9k_calculate_iter_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ath9k_vif_iter_data *iter_data); + #endif /* ATH9K_H */ diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 385ba03134ba..6d2a545fc35e 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -112,8 +112,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp, static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_tx_control txctl; @@ -132,8 +131,7 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_buf *bf; struct ath_vif *avp; @@ -142,13 +140,10 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, struct ieee80211_tx_info *info; int cabq_depth; - if (aphy->state != ATH_WIPHY_ACTIVE) - return NULL; - avp = (void *)vif->drv_priv; cabq = sc->beacon.cabq; - if (avp->av_bcbuf == NULL) + if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active) return NULL; /* Release the old beacon first */ @@ -225,13 +220,13 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, return bf; } -int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) +int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif) { - struct ath_softc *sc = aphy->sc; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_vif *avp; struct ath_buf *bf; struct sk_buff *skb; + struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; __le64 tstamp; avp = (void *)vif->drv_priv; @@ -244,9 +239,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) struct ath_buf, list); list_del(&avp->av_bcbuf->list); - if (sc->sc_ah->opmode == NL80211_IFTYPE_AP || - sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC || - sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT) { + if (ath9k_uses_beacons(vif->type)) { int slot; /* * Assign the vif to a beacon xmit slot. As @@ -256,6 +249,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) for (slot = 0; slot < ATH_BCBUF; slot++) if (sc->beacon.bslot[slot] == NULL) { avp->av_bslot = slot; + avp->is_bslot_active = false; /* NB: keep looking for a double slot */ if (slot == 0 || !sc->beacon.bslot[slot-1]) @@ -263,7 +257,6 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) } BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL); sc->beacon.bslot[avp->av_bslot] = vif; - sc->beacon.bslot_aphy[avp->av_bslot] = aphy; sc->nbcnvifs++; } } @@ -281,10 +274,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) /* NB: the beacon data buffer must be 32-bit aligned. */ skb = ieee80211_beacon_get(sc->hw, vif); - if (skb == NULL) { - ath_dbg(common, ATH_DBG_BEACON, "cannot get skb\n"); + if (skb == NULL) return -ENOMEM; - } tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; sc->beacon.bc_tstamp = le64_to_cpu(tstamp); @@ -293,7 +284,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) u64 tsfadjust; int intval; - intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL; + intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL; /* * Calculate the TSF offset for this beacon slot, i.e., the @@ -325,6 +316,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) ath_err(common, "dma_mapping_error on beacon alloc\n"); return -ENOMEM; } + avp->is_bslot_active = true; return 0; } @@ -336,7 +328,6 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp) if (avp->av_bslot != -1) { sc->beacon.bslot[avp->av_bslot] = NULL; - sc->beacon.bslot_aphy[avp->av_bslot] = NULL; sc->nbcnvifs--; } @@ -358,11 +349,11 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp) void ath_beacon_tasklet(unsigned long data) { struct ath_softc *sc = (struct ath_softc *)data; + struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_buf *bf = NULL; struct ieee80211_vif *vif; - struct ath_wiphy *aphy; int slot; u32 bfaddr, bc = 0, tsftu; u64 tsf; @@ -382,6 +373,7 @@ void ath_beacon_tasklet(unsigned long data) ath_dbg(common, ATH_DBG_BSTUCK, "missed %u consecutive beacons\n", sc->beacon.bmisscnt); + ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq); ath9k_hw_bstuck_nfcal(ah); } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { ath_dbg(common, ATH_DBG_BSTUCK, @@ -406,7 +398,7 @@ void ath_beacon_tasklet(unsigned long data) * on the tsf to safeguard against missing an swba. */ - intval = sc->beacon_interval ? : ATH_DEFAULT_BINTVAL; + intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL; tsf = ath9k_hw_gettsf64(ah); tsftu = TSF_TO_TU(tsf>>32, tsf); @@ -420,7 +412,6 @@ void ath_beacon_tasklet(unsigned long data) */ slot = ATH_BCBUF - slot - 1; vif = sc->beacon.bslot[slot]; - aphy = sc->beacon.bslot_aphy[slot]; ath_dbg(common, ATH_DBG_BEACON, "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", @@ -428,7 +419,7 @@ void ath_beacon_tasklet(unsigned long data) bfaddr = 0; if (vif) { - bf = ath_beacon_generate(aphy->hw, vif); + bf = ath_beacon_generate(sc->hw, vif); if (bf != NULL) { bfaddr = bf->bf_daddr; bc = 1; @@ -460,16 +451,6 @@ void ath_beacon_tasklet(unsigned long data) sc->beacon.updateslot = OK; } if (bfaddr != 0) { - /* - * Stop any current dma and put the new frame(s) on the queue. - * This should never fail since we check above that no frames - * are still pending on the queue. - */ - if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) { - ath_err(common, "beacon queue %u did not stop?\n", - sc->beacon.beaconq); - } - /* NB: cabq traffic should already be queued and primed */ ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr); ath9k_hw_txstart(ah, sc->beacon.beaconq); @@ -720,10 +701,10 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) iftype = sc->sc_ah->opmode; } - cur_conf->listen_interval = 1; - cur_conf->dtim_count = 1; - cur_conf->bmiss_timeout = - ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; + cur_conf->listen_interval = 1; + cur_conf->dtim_count = 1; + cur_conf->bmiss_timeout = + ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; /* * It looks like mac80211 may end up using beacon interval of zero in @@ -735,8 +716,9 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) cur_conf->beacon_interval = 100; /* - * Some times we dont parse dtim period from mac80211, in that case - * use a default value + * We don't parse dtim period from mac80211 during the driver + * initialization as it breaks association with hidden-ssid + * AP and it causes latency in roaming */ if (cur_conf->dtim_period == 0) cur_conf->dtim_period = 1; @@ -760,3 +742,36 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) sc->sc_flags |= SC_OP_BEACONS; } + +void ath9k_set_beaconing_status(struct ath_softc *sc, bool status) +{ + struct ath_hw *ah = sc->sc_ah; + struct ath_vif *avp; + int slot; + bool found = false; + + ath9k_ps_wakeup(sc); + if (status) { + for (slot = 0; slot < ATH_BCBUF; slot++) { + if (sc->beacon.bslot[slot]) { + avp = (void *)sc->beacon.bslot[slot]->drv_priv; + if (avp->is_bslot_active) { + found = true; + break; + } + } + } + if (found) { + /* Re-enable beaconing */ + ah->imask |= ATH9K_INT_SWBA; + ath9k_hw_set_interrupts(ah, ah->imask); + } + } else { + /* Disable SWBA interrupt */ + ah->imask &= ~ATH9K_INT_SWBA; + ath9k_hw_set_interrupts(ah, ah->imask); + tasklet_kill(&sc->bcon_tasklet); + ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq); + } + ath9k_ps_restore(sc); +} diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index b68a1acbddd0..8649581fa4dd 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -262,7 +262,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) * since 250us often results in NF load timeout and causes deaf * condition during stress testing 12/12/2009 */ - for (j = 0; j < 1000; j++) { + for (j = 0; j < 10000; j++) { if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) == 0) break; @@ -278,7 +278,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) * here, the baseband nf cal will just be capped by our present * noisefloor until the next calibration timer. */ - if (j == 1000) { + if (j == 10000) { ath_dbg(common, ATH_DBG_ANY, "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n", REG_READ(ah, AR_PHY_AGC_CONTROL)); @@ -382,9 +382,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah, s16 default_nf; int i, j; - if (!ah->caldata) - return; - + ah->caldata->channel = chan->channel; + ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT; h = ah->caldata->nfCalHist; default_nf = ath9k_hw_get_default_nf(ah, chan); for (i = 0; i < NUM_NF_READINGS; i++) { diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c index df1998d48253..615e68276e72 100644 --- a/drivers/net/wireless/ath/ath9k/common.c +++ b/drivers/net/wireless/ath/ath9k/common.c @@ -189,6 +189,17 @@ void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common, } EXPORT_SYMBOL(ath9k_cmn_btcoex_bt_stomp); +void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow, + u16 new_txpow, u16 *txpower) +{ + if (cur_txpow != new_txpow) { + ath9k_hw_set_txpowerlimit(ah, new_txpow, false); + /* read back in case value is clamped */ + *txpower = ath9k_hw_regulatory(ah)->power_limit; + } +} +EXPORT_SYMBOL(ath9k_cmn_update_txpow); + static int __init ath9k_cmn_init(void) { return 0; diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index a126bddebb0a..b2f7b5f89097 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -23,8 +23,6 @@ /* Common header for Atheros 802.11n base driver cores */ -#define IEEE80211_WEP_NKID 4 - #define WME_NUM_TID 16 #define WME_BA_BMP_SIZE 64 #define WME_MAX_BA WME_BA_BMP_SIZE @@ -70,3 +68,5 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw, int ath9k_cmn_count_streams(unsigned int chainmask, int max); void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common, enum ath_stomp_type stomp_type); +void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow, + u16 new_txpow, u16 *txpower); diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 3586c43077a7..8df5a92a20f1 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -15,6 +15,7 @@ */ #include <linux/slab.h> +#include <linux/vmalloc.h> #include <asm/unaligned.h> #include "ath9k.h" @@ -30,6 +31,19 @@ static int ath9k_debugfs_open(struct inode *inode, struct file *file) return 0; } +static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + u8 *buf = file->private_data; + return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); +} + +static int ath9k_debugfs_release_buf(struct inode *inode, struct file *file) +{ + vfree(file->private_data); + return 0; +} + #ifdef CONFIG_ATH_DEBUG static ssize_t read_file_debug(struct file *file, char __user *user_buf, @@ -381,41 +395,40 @@ static const struct file_operations fops_interrupt = { .llseek = default_llseek, }; -static const char * ath_wiphy_state_str(enum ath_wiphy_state state) +static const char *channel_type_str(enum nl80211_channel_type t) { - switch (state) { - case ATH_WIPHY_INACTIVE: - return "INACTIVE"; - case ATH_WIPHY_ACTIVE: - return "ACTIVE"; - case ATH_WIPHY_PAUSING: - return "PAUSING"; - case ATH_WIPHY_PAUSED: - return "PAUSED"; - case ATH_WIPHY_SCAN: - return "SCAN"; + switch (t) { + case NL80211_CHAN_NO_HT: + return "no ht"; + case NL80211_CHAN_HT20: + return "ht20"; + case NL80211_CHAN_HT40MINUS: + return "ht40-"; + case NL80211_CHAN_HT40PLUS: + return "ht40+"; + default: + return "???"; } - return "?"; } static ssize_t read_file_wiphy(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; - struct ath_wiphy *aphy = sc->pri_wiphy; - struct ieee80211_channel *chan = aphy->hw->conf.channel; + struct ieee80211_channel *chan = sc->hw->conf.channel; + struct ieee80211_conf *conf = &(sc->hw->conf); char buf[512]; unsigned int len = 0; - int i; u8 addr[ETH_ALEN]; u32 tmp; len += snprintf(buf + len, sizeof(buf) - len, - "primary: %s (%s chan=%d ht=%d)\n", - wiphy_name(sc->pri_wiphy->hw->wiphy), - ath_wiphy_state_str(sc->pri_wiphy->state), + "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n", + wiphy_name(sc->hw->wiphy), ieee80211_frequency_to_channel(chan->center_freq), - aphy->chan_is_ht); + chan->center_freq, + conf->channel_type, + channel_type_str(conf->channel_type)); put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr); put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4); @@ -457,156 +470,82 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf, else len += snprintf(buf + len, sizeof(buf) - len, "\n"); - /* Put variable-length stuff down here, and check for overflows. */ - for (i = 0; i < sc->num_sec_wiphy; i++) { - struct ath_wiphy *aphy_tmp = sc->sec_wiphy[i]; - if (aphy_tmp == NULL) - continue; - chan = aphy_tmp->hw->conf.channel; - len += snprintf(buf + len, sizeof(buf) - len, - "secondary: %s (%s chan=%d ht=%d)\n", - wiphy_name(aphy_tmp->hw->wiphy), - ath_wiphy_state_str(aphy_tmp->state), - ieee80211_frequency_to_channel(chan->center_freq), - aphy_tmp->chan_is_ht); - } if (len > sizeof(buf)) len = sizeof(buf); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } -static struct ath_wiphy * get_wiphy(struct ath_softc *sc, const char *name) -{ - int i; - if (strcmp(name, wiphy_name(sc->pri_wiphy->hw->wiphy)) == 0) - return sc->pri_wiphy; - for (i = 0; i < sc->num_sec_wiphy; i++) { - struct ath_wiphy *aphy = sc->sec_wiphy[i]; - if (aphy && strcmp(name, wiphy_name(aphy->hw->wiphy)) == 0) - return aphy; - } - return NULL; -} - -static int del_wiphy(struct ath_softc *sc, const char *name) -{ - struct ath_wiphy *aphy = get_wiphy(sc, name); - if (!aphy) - return -ENOENT; - return ath9k_wiphy_del(aphy); -} - -static int pause_wiphy(struct ath_softc *sc, const char *name) -{ - struct ath_wiphy *aphy = get_wiphy(sc, name); - if (!aphy) - return -ENOENT; - return ath9k_wiphy_pause(aphy); -} - -static int unpause_wiphy(struct ath_softc *sc, const char *name) -{ - struct ath_wiphy *aphy = get_wiphy(sc, name); - if (!aphy) - return -ENOENT; - return ath9k_wiphy_unpause(aphy); -} - -static int select_wiphy(struct ath_softc *sc, const char *name) -{ - struct ath_wiphy *aphy = get_wiphy(sc, name); - if (!aphy) - return -ENOENT; - return ath9k_wiphy_select(aphy); -} - -static int schedule_wiphy(struct ath_softc *sc, const char *msec) -{ - ath9k_wiphy_set_scheduler(sc, simple_strtoul(msec, NULL, 0)); - return 0; -} - -static ssize_t write_file_wiphy(struct file *file, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ath_softc *sc = file->private_data; - char buf[50]; - size_t len; - - len = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, len)) - return -EFAULT; - buf[len] = '\0'; - if (len > 0 && buf[len - 1] == '\n') - buf[len - 1] = '\0'; - - if (strncmp(buf, "add", 3) == 0) { - int res = ath9k_wiphy_add(sc); - if (res < 0) - return res; - } else if (strncmp(buf, "del=", 4) == 0) { - int res = del_wiphy(sc, buf + 4); - if (res < 0) - return res; - } else if (strncmp(buf, "pause=", 6) == 0) { - int res = pause_wiphy(sc, buf + 6); - if (res < 0) - return res; - } else if (strncmp(buf, "unpause=", 8) == 0) { - int res = unpause_wiphy(sc, buf + 8); - if (res < 0) - return res; - } else if (strncmp(buf, "select=", 7) == 0) { - int res = select_wiphy(sc, buf + 7); - if (res < 0) - return res; - } else if (strncmp(buf, "schedule=", 9) == 0) { - int res = schedule_wiphy(sc, buf + 9); - if (res < 0) - return res; - } else - return -EOPNOTSUPP; - - return count; -} - static const struct file_operations fops_wiphy = { .read = read_file_wiphy, - .write = write_file_wiphy, .open = ath9k_debugfs_open, .owner = THIS_MODULE, .llseek = default_llseek, }; +#define PR_QNUM(_n) sc->tx.txq_map[_n]->axq_qnum #define PR(str, elem) \ do { \ len += snprintf(buf + len, size - len, \ "%s%13u%11u%10u%10u\n", str, \ - sc->debug.stats.txstats[WME_AC_BE].elem, \ - sc->debug.stats.txstats[WME_AC_BK].elem, \ - sc->debug.stats.txstats[WME_AC_VI].elem, \ - sc->debug.stats.txstats[WME_AC_VO].elem); \ + sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem, \ + sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem, \ + sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem, \ + sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem); \ + if (len >= size) \ + goto done; \ +} while(0) + +#define PRX(str, elem) \ +do { \ + len += snprintf(buf + len, size - len, \ + "%s%13u%11u%10u%10u\n", str, \ + (unsigned int)(sc->tx.txq_map[WME_AC_BE]->elem), \ + (unsigned int)(sc->tx.txq_map[WME_AC_BK]->elem), \ + (unsigned int)(sc->tx.txq_map[WME_AC_VI]->elem), \ + (unsigned int)(sc->tx.txq_map[WME_AC_VO]->elem)); \ + if (len >= size) \ + goto done; \ } while(0) +#define PRQLE(str, elem) \ +do { \ + len += snprintf(buf + len, size - len, \ + "%s%13i%11i%10i%10i\n", str, \ + list_empty(&sc->tx.txq_map[WME_AC_BE]->elem), \ + list_empty(&sc->tx.txq_map[WME_AC_BK]->elem), \ + list_empty(&sc->tx.txq_map[WME_AC_VI]->elem), \ + list_empty(&sc->tx.txq_map[WME_AC_VO]->elem)); \ + if (len >= size) \ + goto done; \ +} while (0) + static ssize_t read_file_xmit(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath_softc *sc = file->private_data; char *buf; - unsigned int len = 0, size = 2048; + unsigned int len = 0, size = 8000; + int i; ssize_t retval = 0; + char tmp[32]; buf = kzalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; - len += sprintf(buf, "%30s %10s%10s%10s\n\n", "BE", "BK", "VI", "VO"); + len += sprintf(buf, "Num-Tx-Queues: %i tx-queues-setup: 0x%x" + " poll-work-seen: %u\n" + "%30s %10s%10s%10s\n\n", + ATH9K_NUM_TX_QUEUES, sc->tx.txqsetup, + sc->tx_complete_poll_work_seen, + "BE", "BK", "VI", "VO"); PR("MPDUs Queued: ", queued); PR("MPDUs Completed: ", completed); PR("Aggregates: ", a_aggr); - PR("AMPDUs Queued: ", a_queued); + PR("AMPDUs Queued HW:", a_queued_hw); + PR("AMPDUs Queued SW:", a_queued_sw); PR("AMPDUs Completed:", a_completed); PR("AMPDUs Retried: ", a_retries); PR("AMPDUs XRetried: ", a_xretries); @@ -618,6 +557,223 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf, PR("DELIM Underrun: ", delim_underrun); PR("TX-Pkts-All: ", tx_pkts_all); PR("TX-Bytes-All: ", tx_bytes_all); + PR("hw-put-tx-buf: ", puttxbuf); + PR("hw-tx-start: ", txstart); + PR("hw-tx-proc-desc: ", txprocdesc); + len += snprintf(buf + len, size - len, + "%s%11p%11p%10p%10p\n", "txq-memory-address:", + sc->tx.txq_map[WME_AC_BE], + sc->tx.txq_map[WME_AC_BK], + sc->tx.txq_map[WME_AC_VI], + sc->tx.txq_map[WME_AC_VO]); + if (len >= size) + goto done; + + PRX("axq-qnum: ", axq_qnum); + PRX("axq-depth: ", axq_depth); + PRX("axq-ampdu_depth: ", axq_ampdu_depth); + PRX("axq-stopped ", stopped); + PRX("tx-in-progress ", axq_tx_inprogress); + PRX("pending-frames ", pending_frames); + PRX("txq_headidx: ", txq_headidx); + PRX("txq_tailidx: ", txq_headidx); + + PRQLE("axq_q empty: ", axq_q); + PRQLE("axq_acq empty: ", axq_acq); + PRQLE("txq_fifo_pending: ", txq_fifo_pending); + for (i = 0; i < ATH_TXFIFO_DEPTH; i++) { + snprintf(tmp, sizeof(tmp) - 1, "txq_fifo[%i] empty: ", i); + PRQLE(tmp, txq_fifo[i]); + } + + /* Print out more detailed queue-info */ + for (i = 0; i <= WME_AC_BK; i++) { + struct ath_txq *txq = &(sc->tx.txq[i]); + struct ath_atx_ac *ac; + struct ath_atx_tid *tid; + if (len >= size) + goto done; + spin_lock_bh(&txq->axq_lock); + if (!list_empty(&txq->axq_acq)) { + ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, + list); + len += snprintf(buf + len, size - len, + "txq[%i] first-ac: %p sched: %i\n", + i, ac, ac->sched); + if (list_empty(&ac->tid_q) || (len >= size)) + goto done_for; + tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, + list); + len += snprintf(buf + len, size - len, + " first-tid: %p sched: %i paused: %i\n", + tid, tid->sched, tid->paused); + } + done_for: + spin_unlock_bh(&txq->axq_lock); + } + +done: + if (len > size) + len = size; + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; +} + +static ssize_t read_file_stations(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + char *buf; + unsigned int len = 0, size = 64000; + struct ath_node *an = NULL; + ssize_t retval = 0; + int q; + + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + len += snprintf(buf + len, size - len, + "Stations:\n" + " tid: addr sched paused buf_q-empty an ac\n" + " ac: addr sched tid_q-empty txq\n"); + + spin_lock(&sc->nodes_lock); + list_for_each_entry(an, &sc->nodes, list) { + len += snprintf(buf + len, size - len, + "%pM\n", an->sta->addr); + if (len >= size) + goto done; + + for (q = 0; q < WME_NUM_TID; q++) { + struct ath_atx_tid *tid = &(an->tid[q]); + len += snprintf(buf + len, size - len, + " tid: %p %s %s %i %p %p\n", + tid, tid->sched ? "sched" : "idle", + tid->paused ? "paused" : "running", + list_empty(&tid->buf_q), + tid->an, tid->ac); + if (len >= size) + goto done; + } + + for (q = 0; q < WME_NUM_AC; q++) { + struct ath_atx_ac *ac = &(an->ac[q]); + len += snprintf(buf + len, size - len, + " ac: %p %s %i %p\n", + ac, ac->sched ? "sched" : "idle", + list_empty(&ac->tid_q), ac->txq); + if (len >= size) + goto done; + } + } + +done: + spin_unlock(&sc->nodes_lock); + if (len > size) + len = size; + + retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return retval; +} + +static ssize_t read_file_misc(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath_softc *sc = file->private_data; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ath_hw *ah = sc->sc_ah; + struct ieee80211_hw *hw = sc->hw; + char *buf; + unsigned int len = 0, size = 8000; + ssize_t retval = 0; + const char *tmp; + unsigned int reg; + struct ath9k_vif_iter_data iter_data; + + ath9k_calculate_iter_data(hw, NULL, &iter_data); + + buf = kzalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + switch (sc->sc_ah->opmode) { + case NL80211_IFTYPE_ADHOC: + tmp = "ADHOC"; + break; + case NL80211_IFTYPE_MESH_POINT: + tmp = "MESH"; + break; + case NL80211_IFTYPE_AP: + tmp = "AP"; + break; + case NL80211_IFTYPE_STATION: + tmp = "STATION"; + break; + default: + tmp = "???"; + break; + } + + len += snprintf(buf + len, size - len, + "curbssid: %pM\n" + "OP-Mode: %s(%i)\n" + "Beacon-Timer-Register: 0x%x\n", + common->curbssid, + tmp, (int)(sc->sc_ah->opmode), + REG_READ(ah, AR_BEACON_PERIOD)); + + reg = REG_READ(ah, AR_TIMER_MODE); + len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (", + reg); + if (reg & AR_TBTT_TIMER_EN) + len += snprintf(buf + len, size - len, "TBTT "); + if (reg & AR_DBA_TIMER_EN) + len += snprintf(buf + len, size - len, "DBA "); + if (reg & AR_SWBA_TIMER_EN) + len += snprintf(buf + len, size - len, "SWBA "); + if (reg & AR_HCF_TIMER_EN) + len += snprintf(buf + len, size - len, "HCF "); + if (reg & AR_TIM_TIMER_EN) + len += snprintf(buf + len, size - len, "TIM "); + if (reg & AR_DTIM_TIMER_EN) + len += snprintf(buf + len, size - len, "DTIM "); + len += snprintf(buf + len, size - len, ")\n"); + + reg = sc->sc_ah->imask; + len += snprintf(buf + len, size - len, "imask: 0x%x (", reg); + if (reg & ATH9K_INT_SWBA) + len += snprintf(buf + len, size - len, "SWBA "); + if (reg & ATH9K_INT_BMISS) + len += snprintf(buf + len, size - len, "BMISS "); + if (reg & ATH9K_INT_CST) + len += snprintf(buf + len, size - len, "CST "); + if (reg & ATH9K_INT_RX) + len += snprintf(buf + len, size - len, "RX "); + if (reg & ATH9K_INT_RXHP) + len += snprintf(buf + len, size - len, "RXHP "); + if (reg & ATH9K_INT_RXLP) + len += snprintf(buf + len, size - len, "RXLP "); + if (reg & ATH9K_INT_BB_WATCHDOG) + len += snprintf(buf + len, size - len, "BB_WATCHDOG "); + /* there are other IRQs if one wanted to add them. */ + len += snprintf(buf + len, size - len, ")\n"); + + len += snprintf(buf + len, size - len, + "VIF Counts: AP: %i STA: %i MESH: %i WDS: %i" + " ADHOC: %i OTHER: %i nvifs: %hi beacon-vifs: %hi\n", + iter_data.naps, iter_data.nstations, iter_data.nmeshes, + iter_data.nwds, iter_data.nadhocs, iter_data.nothers, + sc->nvifs, sc->nbcnvifs); + + len += snprintf(buf + len, size - len, + "Calculated-BSSID-Mask: %pM\n", + iter_data.mask); if (len > size) len = size; @@ -629,9 +785,9 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf, } void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, - struct ath_tx_status *ts) + struct ath_tx_status *ts, struct ath_txq *txq) { - int qnum = skb_get_queue_mapping(bf->bf_mpdu); + int qnum = txq->axq_qnum; TX_STAT_INC(qnum, tx_pkts_all); sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len; @@ -666,6 +822,20 @@ static const struct file_operations fops_xmit = { .llseek = default_llseek, }; +static const struct file_operations fops_stations = { + .read = read_file_stations, + .open = ath9k_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static const struct file_operations fops_misc = { + .read = read_file_misc, + .open = ath9k_debugfs_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + static ssize_t read_file_recv(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -871,6 +1041,42 @@ static const struct file_operations fops_regval = { .llseek = default_llseek, }; +#define REGDUMP_LINE_SIZE 20 + +static int open_file_regdump(struct inode *inode, struct file *file) +{ + struct ath_softc *sc = inode->i_private; + unsigned int len = 0; + u8 *buf; + int i; + unsigned long num_regs, regdump_len, max_reg_offset; + + max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500; + num_regs = max_reg_offset / 4 + 1; + regdump_len = num_regs * REGDUMP_LINE_SIZE + 1; + buf = vmalloc(regdump_len); + if (!buf) + return -ENOMEM; + + ath9k_ps_wakeup(sc); + for (i = 0; i < num_regs; i++) + len += scnprintf(buf + len, regdump_len - len, + "0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2)); + ath9k_ps_restore(sc); + + file->private_data = buf; + + return 0; +} + +static const struct file_operations fops_regdump = { + .open = open_file_regdump, + .read = ath9k_debugfs_read_buf, + .release = ath9k_debugfs_release_buf, + .owner = THIS_MODULE, + .llseek = default_llseek,/* read accesses f_pos */ +}; + int ath9k_init_debug(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); @@ -903,6 +1109,14 @@ int ath9k_init_debug(struct ath_hw *ah) sc, &fops_xmit)) goto err; + if (!debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, + sc, &fops_stations)) + goto err; + + if (!debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, + sc, &fops_misc)) + goto err; + if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_recv)) goto err; @@ -927,6 +1141,10 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca)) goto err; + if (!debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, + sc, &fops_regdump)) + goto err; + sc->debug.regidx = 0; return 0; err: diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 1e5078bd0344..59338de0ce19 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -89,7 +89,8 @@ struct ath_interrupt_stats { * @queued: Total MPDUs (non-aggr) queued * @completed: Total MPDUs (non-aggr) completed * @a_aggr: Total no. of aggregates queued - * @a_queued: Total AMPDUs queued + * @a_queued_hw: Total AMPDUs queued to hardware + * @a_queued_sw: Total AMPDUs queued to software queues * @a_completed: Total AMPDUs completed * @a_retries: No. of AMPDUs retried (SW) * @a_xretries: No. of AMPDUs dropped due to xretries @@ -102,6 +103,9 @@ struct ath_interrupt_stats { * @desc_cfg_err: Descriptor configuration errors * @data_urn: TX data underrun errors * @delim_urn: TX delimiter underrun errors + * @puttxbuf: Number of times hardware was given txbuf to write. + * @txstart: Number of times hardware was told to start tx. + * @txprocdesc: Number of times tx descriptor was processed */ struct ath_tx_stats { u32 tx_pkts_all; @@ -109,7 +113,8 @@ struct ath_tx_stats { u32 queued; u32 completed; u32 a_aggr; - u32 a_queued; + u32 a_queued_hw; + u32 a_queued_sw; u32 a_completed; u32 a_retries; u32 a_xretries; @@ -119,6 +124,9 @@ struct ath_tx_stats { u32 desc_cfg_err; u32 data_underrun; u32 delim_underrun; + u32 puttxbuf; + u32 txstart; + u32 txprocdesc; }; /** @@ -167,7 +175,7 @@ int ath9k_init_debug(struct ath_hw *ah); void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, - struct ath_tx_status *ts); + struct ath_tx_status *ts, struct ath_txq *txq); void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs); #else @@ -184,7 +192,8 @@ static inline void ath_debug_stat_interrupt(struct ath_softc *sc, static inline void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, - struct ath_tx_status *ts) + struct ath_tx_status *ts, + struct ath_txq *txq) { } diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index d05163159572..8c18bed3a558 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -89,6 +89,38 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, return false; } +void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data, + int eep_start_loc, int size) +{ + int i = 0, j, addr; + u32 addrdata[8]; + u32 data[8]; + + for (addr = 0; addr < size; addr++) { + addrdata[i] = AR5416_EEPROM_OFFSET + + ((addr + eep_start_loc) << AR5416_EEPROM_S); + i++; + if (i == 8) { + REG_READ_MULTI(ah, addrdata, data, i); + + for (j = 0; j < i; j++) { + *eep_data = data[j]; + eep_data++; + } + i = 0; + } + } + + if (i != 0) { + REG_READ_MULTI(ah, addrdata, data, i); + + for (j = 0; j < i; j++) { + *eep_data = data[j]; + eep_data++; + } + } +} + bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data) { return common->bus_ops->eeprom_read(common, off, data); diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 58e2ddc927a9..bd82447f5b78 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -665,6 +665,8 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight, bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, u16 *indexL, u16 *indexR); bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data); +void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data, + int eep_start_loc, int size); void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, u8 *pVpdList, u16 numIntercepts, u8 *pRetVpdList); diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index fbdff7e47952..bc77a308c901 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -27,19 +27,13 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah) return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF); } -static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) -{ #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) + +static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) +{ struct ath_common *common = ath9k_hw_common(ah); u16 *eep_data = (u16 *)&ah->eeprom.map4k; - int addr, eep_start_loc = 0; - - eep_start_loc = 64; - - if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, ATH_DBG_EEPROM, - "Reading from EEPROM, not flash\n"); - } + int addr, eep_start_loc = 64; for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { @@ -51,9 +45,34 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) } return true; -#undef SIZE_EEPROM_4K } +static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah) +{ + u16 *eep_data = (u16 *)&ah->eeprom.map4k; + + ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K); + + return true; +} + +static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (!ath9k_hw_use_flash(ah)) { + ath_dbg(common, ATH_DBG_EEPROM, + "Reading from EEPROM, not flash\n"); + } + + if (common->bus_ops->ath_bus_type == ATH_USB) + return __ath9k_hw_usb_4k_fill_eeprom(ah); + else + return __ath9k_hw_4k_fill_eeprom(ah); +} + +#undef SIZE_EEPROM_4K + static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) { #define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 9b6bc8a953bc..8cd8333cc086 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -17,7 +17,7 @@ #include "hw.h" #include "ar9002_phy.h" -#define NUM_EEP_WORDS (sizeof(struct ar9287_eeprom) / sizeof(u16)) +#define SIZE_EEPROM_AR9287 (sizeof(struct ar9287_eeprom) / sizeof(u16)) static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah) { @@ -29,25 +29,15 @@ static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah) return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF; } -static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) +static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) { struct ar9287_eeprom *eep = &ah->eeprom.map9287; struct ath_common *common = ath9k_hw_common(ah); u16 *eep_data; - int addr, eep_start_loc; + int addr, eep_start_loc = AR9287_EEP_START_LOC; eep_data = (u16 *)eep; - if (common->bus_ops->ath_bus_type == ATH_USB) - eep_start_loc = AR9287_HTC_EEP_START_LOC; - else - eep_start_loc = AR9287_EEP_START_LOC; - - if (!ath9k_hw_use_flash(ah)) { - ath_dbg(common, ATH_DBG_EEPROM, - "Reading from EEPROM, not flash\n"); - } - - for (addr = 0; addr < NUM_EEP_WORDS; addr++) { + for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { ath_dbg(common, ATH_DBG_EEPROM, @@ -60,6 +50,31 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) return true; } +static bool __ath9k_hw_usb_ar9287_fill_eeprom(struct ath_hw *ah) +{ + u16 *eep_data = (u16 *)&ah->eeprom.map9287; + + ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, + AR9287_HTC_EEP_START_LOC, + SIZE_EEPROM_AR9287); + return true; +} + +static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (!ath9k_hw_use_flash(ah)) { + ath_dbg(common, ATH_DBG_EEPROM, + "Reading from EEPROM, not flash\n"); + } + + if (common->bus_ops->ath_bus_type == ATH_USB) + return __ath9k_hw_usb_ar9287_fill_eeprom(ah); + else + return __ath9k_hw_ar9287_fill_eeprom(ah); +} + static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) { u32 sum = 0, el, integer; @@ -86,7 +101,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah) need_swap = true; eepdata = (u16 *)(&ah->eeprom); - for (addr = 0; addr < NUM_EEP_WORDS; addr++) { + for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) { temp = swab16(*eepdata); *eepdata = temp; eepdata++; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 749a93608664..fccd87df7300 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -86,9 +86,10 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah) return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF); } -static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) -{ #define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) + +static bool __ath9k_hw_def_fill_eeprom(struct ath_hw *ah) +{ struct ath_common *common = ath9k_hw_common(ah); u16 *eep_data = (u16 *)&ah->eeprom.def; int addr, ar5416_eep_start_loc = 0x100; @@ -103,9 +104,34 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) eep_data++; } return true; -#undef SIZE_EEPROM_DEF } +static bool __ath9k_hw_usb_def_fill_eeprom(struct ath_hw *ah) +{ + u16 *eep_data = (u16 *)&ah->eeprom.def; + + ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, + 0x100, SIZE_EEPROM_DEF); + return true; +} + +static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) +{ + struct ath_common *common = ath9k_hw_common(ah); + + if (!ath9k_hw_use_flash(ah)) { + ath_dbg(common, ATH_DBG_EEPROM, + "Reading from EEPROM, not flash\n"); + } + + if (common->bus_ops->ath_bus_type == ATH_USB) + return __ath9k_hw_usb_def_fill_eeprom(ah); + else + return __ath9k_hw_def_fill_eeprom(ah); +} + +#undef SIZE_EEPROM_DEF + static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) { struct ar5416_eeprom_def *eep = @@ -221,9 +247,9 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah) } /* Enable fixup for AR_AN_TOP2 if necessary */ - if (AR_SREV_9280_20_OR_LATER(ah) && - (eep->baseEepHeader.version & 0xff) > 0x0a && - eep->baseEepHeader.pwdclkind == 0) + if ((ah->hw_version.devid == AR9280_DEVID_PCI) && + ((eep->baseEepHeader.version & 0xff) > 0x0a) && + (eep->baseEepHeader.pwdclkind == 0)) ah->need_an_top2_fixup = 1; if ((common->bus_ops->ath_bus_type == ATH_USB) && diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c index 133764069246..0fb8f8ac275a 100644 --- a/drivers/net/wireless/ath/ath9k/gpio.c +++ b/drivers/net/wireless/ath/ath9k/gpio.c @@ -20,121 +20,31 @@ /* LED functions */ /********************************/ -static void ath_led_blink_work(struct work_struct *work) -{ - struct ath_softc *sc = container_of(work, struct ath_softc, - ath_led_blink_work.work); - - if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED)) - return; - - if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) || - (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE)) - ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0); - else - ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, - (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0); - - ieee80211_queue_delayed_work(sc->hw, - &sc->ath_led_blink_work, - (sc->sc_flags & SC_OP_LED_ON) ? - msecs_to_jiffies(sc->led_off_duration) : - msecs_to_jiffies(sc->led_on_duration)); - - sc->led_on_duration = sc->led_on_cnt ? - max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) : - ATH_LED_ON_DURATION_IDLE; - sc->led_off_duration = sc->led_off_cnt ? - max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) : - ATH_LED_OFF_DURATION_IDLE; - sc->led_on_cnt = sc->led_off_cnt = 0; - if (sc->sc_flags & SC_OP_LED_ON) - sc->sc_flags &= ~SC_OP_LED_ON; - else - sc->sc_flags |= SC_OP_LED_ON; -} - +#ifdef CONFIG_MAC80211_LEDS static void ath_led_brightness(struct led_classdev *led_cdev, enum led_brightness brightness) { - struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev); - struct ath_softc *sc = led->sc; - - switch (brightness) { - case LED_OFF: - if (led->led_type == ATH_LED_ASSOC || - led->led_type == ATH_LED_RADIO) { - ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, - (led->led_type == ATH_LED_RADIO)); - sc->sc_flags &= ~SC_OP_LED_ASSOCIATED; - if (led->led_type == ATH_LED_RADIO) - sc->sc_flags &= ~SC_OP_LED_ON; - } else { - sc->led_off_cnt++; - } - break; - case LED_FULL: - if (led->led_type == ATH_LED_ASSOC) { - sc->sc_flags |= SC_OP_LED_ASSOCIATED; - if (led_blink) - ieee80211_queue_delayed_work(sc->hw, - &sc->ath_led_blink_work, 0); - } else if (led->led_type == ATH_LED_RADIO) { - ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0); - sc->sc_flags |= SC_OP_LED_ON; - } else { - sc->led_on_cnt++; - } - break; - default: - break; - } -} - -static int ath_register_led(struct ath_softc *sc, struct ath_led *led, - char *trigger) -{ - int ret; - - led->sc = sc; - led->led_cdev.name = led->name; - led->led_cdev.default_trigger = trigger; - led->led_cdev.brightness_set = ath_led_brightness; - - ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev); - if (ret) - ath_err(ath9k_hw_common(sc->sc_ah), - "Failed to register led:%s", led->name); - else - led->registered = 1; - return ret; -} - -static void ath_unregister_led(struct ath_led *led) -{ - if (led->registered) { - led_classdev_unregister(&led->led_cdev); - led->registered = 0; - } + struct ath_softc *sc = container_of(led_cdev, struct ath_softc, led_cdev); + ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, (brightness == LED_OFF)); } void ath_deinit_leds(struct ath_softc *sc) { - ath_unregister_led(&sc->assoc_led); - sc->sc_flags &= ~SC_OP_LED_ASSOCIATED; - ath_unregister_led(&sc->tx_led); - ath_unregister_led(&sc->rx_led); - ath_unregister_led(&sc->radio_led); - ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); + if (!sc->led_registered) + return; + + ath_led_brightness(&sc->led_cdev, LED_OFF); + led_classdev_unregister(&sc->led_cdev); } void ath_init_leds(struct ath_softc *sc) { - char *trigger; int ret; if (AR_SREV_9287(sc->sc_ah)) sc->sc_ah->led_pin = ATH_LED_PIN_9287; + else if (AR_SREV_9485(sc->sc_ah)) + sc->sc_ah->led_pin = ATH_LED_PIN_9485; else sc->sc_ah->led_pin = ATH_LED_PIN_DEF; @@ -144,48 +54,22 @@ void ath_init_leds(struct ath_softc *sc) /* LED off, active low */ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); - if (led_blink) - INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work); - - trigger = ieee80211_get_radio_led_name(sc->hw); - snprintf(sc->radio_led.name, sizeof(sc->radio_led.name), - "ath9k-%s::radio", wiphy_name(sc->hw->wiphy)); - ret = ath_register_led(sc, &sc->radio_led, trigger); - sc->radio_led.led_type = ATH_LED_RADIO; - if (ret) - goto fail; - - trigger = ieee80211_get_assoc_led_name(sc->hw); - snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name), - "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy)); - ret = ath_register_led(sc, &sc->assoc_led, trigger); - sc->assoc_led.led_type = ATH_LED_ASSOC; - if (ret) - goto fail; - - trigger = ieee80211_get_tx_led_name(sc->hw); - snprintf(sc->tx_led.name, sizeof(sc->tx_led.name), - "ath9k-%s::tx", wiphy_name(sc->hw->wiphy)); - ret = ath_register_led(sc, &sc->tx_led, trigger); - sc->tx_led.led_type = ATH_LED_TX; - if (ret) - goto fail; - - trigger = ieee80211_get_rx_led_name(sc->hw); - snprintf(sc->rx_led.name, sizeof(sc->rx_led.name), - "ath9k-%s::rx", wiphy_name(sc->hw->wiphy)); - ret = ath_register_led(sc, &sc->rx_led, trigger); - sc->rx_led.led_type = ATH_LED_RX; - if (ret) - goto fail; - - return; - -fail: - if (led_blink) - cancel_delayed_work_sync(&sc->ath_led_blink_work); - ath_deinit_leds(sc); + if (!led_blink) + sc->led_cdev.default_trigger = + ieee80211_get_radio_led_name(sc->hw); + + snprintf(sc->led_name, sizeof(sc->led_name), + "ath9k-%s", wiphy_name(sc->hw->wiphy)); + sc->led_cdev.name = sc->led_name; + sc->led_cdev.brightness_set = ath_led_brightness; + + ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &sc->led_cdev); + if (ret < 0) + return; + + sc->led_registered = true; } +#endif /*******************/ /* Rfkill */ @@ -201,8 +85,7 @@ static bool ath_is_rfkill_set(struct ath_softc *sc) void ath9k_rfkill_poll_state(struct ieee80211_hw *hw) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; bool blocked = !!ath_is_rfkill_set(sc); wiphy_rfkill_set_hw_state(hw->wiphy, blocked); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 07b1633b7f3f..f1b8af64569c 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -52,6 +52,9 @@ static struct usb_device_id ath9k_hif_usb_ids[] = { { USB_DEVICE(0x083A, 0xA704), .driver_info = AR9280_USB }, /* SMC Networks */ + { USB_DEVICE(0x0cf3, 0x20ff), + .driver_info = STORAGE_DEVICE }, + { }, }; @@ -914,13 +917,11 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info) if (ret) { dev_err(&hif_dev->udev->dev, "ath9k_htc: Unable to allocate URBs\n"); - goto err_urb; + goto err_fw_download; } return 0; -err_urb: - ath9k_hif_usb_dealloc_urbs(hif_dev); err_fw_download: release_firmware(hif_dev->firmware); err_fw_req: @@ -935,6 +936,61 @@ static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev) release_firmware(hif_dev->firmware); } +/* + * An exact copy of the function from zd1211rw. + */ +static int send_eject_command(struct usb_interface *interface) +{ + struct usb_device *udev = interface_to_usbdev(interface); + struct usb_host_interface *iface_desc = &interface->altsetting[0]; + struct usb_endpoint_descriptor *endpoint; + unsigned char *cmd; + u8 bulk_out_ep; + int r; + + /* Find bulk out endpoint */ + for (r = 1; r >= 0; r--) { + endpoint = &iface_desc->endpoint[r].desc; + if (usb_endpoint_dir_out(endpoint) && + usb_endpoint_xfer_bulk(endpoint)) { + bulk_out_ep = endpoint->bEndpointAddress; + break; + } + } + if (r == -1) { + dev_err(&udev->dev, + "ath9k_htc: Could not find bulk out endpoint\n"); + return -ENODEV; + } + + cmd = kzalloc(31, GFP_KERNEL); + if (cmd == NULL) + return -ENODEV; + + /* USB bulk command block */ + cmd[0] = 0x55; /* bulk command signature */ + cmd[1] = 0x53; /* bulk command signature */ + cmd[2] = 0x42; /* bulk command signature */ + cmd[3] = 0x43; /* bulk command signature */ + cmd[14] = 6; /* command length */ + + cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */ + cmd[19] = 0x2; /* eject disc */ + + dev_info(&udev->dev, "Ejecting storage device...\n"); + r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep), + cmd, 31, NULL, 2000); + kfree(cmd); + if (r) + return r; + + /* At this point, the device disconnects and reconnects with the real + * ID numbers. */ + + usb_set_intfdata(interface, NULL); + return 0; +} + static int ath9k_hif_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { @@ -942,6 +998,9 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface, struct hif_device_usb *hif_dev; int ret = 0; + if (id->driver_info == STORAGE_DEVICE) + return send_eject_command(interface); + hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL); if (!hif_dev) { ret = -ENOMEM; @@ -1028,12 +1087,13 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) struct hif_device_usb *hif_dev = usb_get_intfdata(interface); bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false; - if (hif_dev) { - ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); - ath9k_htc_hw_free(hif_dev->htc_handle); - ath9k_hif_usb_dev_deinit(hif_dev); - usb_set_intfdata(interface, NULL); - } + if (!hif_dev) + return; + + ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); + ath9k_htc_hw_free(hif_dev->htc_handle); + ath9k_hif_usb_dev_deinit(hif_dev); + usb_set_intfdata(interface, NULL); if (!unplugged && (hif_dev->flags & HIF_USB_START)) ath9k_hif_usb_reboot(udev); diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 780ac5eac501..753a245c5ad1 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -32,6 +32,7 @@ #include "wmi.h" #define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */ +#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */ #define ATH_ANI_POLLINTERVAL 100 /* 100 ms */ #define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ #define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ @@ -204,8 +205,50 @@ struct ath9k_htc_target_stats { __be32 ht_tx_xretries; } __packed; +#define ATH9K_HTC_MAX_VIF 2 +#define ATH9K_HTC_MAX_BCN_VIF 2 + +#define INC_VIF(_priv, _type) do { \ + switch (_type) { \ + case NL80211_IFTYPE_STATION: \ + _priv->num_sta_vif++; \ + break; \ + case NL80211_IFTYPE_ADHOC: \ + _priv->num_ibss_vif++; \ + break; \ + case NL80211_IFTYPE_AP: \ + _priv->num_ap_vif++; \ + break; \ + default: \ + break; \ + } \ + } while (0) + +#define DEC_VIF(_priv, _type) do { \ + switch (_type) { \ + case NL80211_IFTYPE_STATION: \ + _priv->num_sta_vif--; \ + break; \ + case NL80211_IFTYPE_ADHOC: \ + _priv->num_ibss_vif--; \ + break; \ + case NL80211_IFTYPE_AP: \ + _priv->num_ap_vif--; \ + break; \ + default: \ + break; \ + } \ + } while (0) + struct ath9k_htc_vif { u8 index; + u16 seq_no; + bool beacon_configured; +}; + +struct ath9k_vif_iter_data { + const u8 *hw_macaddr; + u8 mask[ETH_ALEN]; }; #define ATH9K_HTC_MAX_STA 8 @@ -310,10 +353,8 @@ struct ath_led { struct htc_beacon_config { u16 beacon_interval; - u16 listen_interval; u16 dtim_period; u16 bmiss_timeout; - u8 dtim_count; }; struct ath_btcoex { @@ -333,13 +374,12 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv); #define OP_SCANNING BIT(1) #define OP_LED_ASSOCIATED BIT(2) #define OP_LED_ON BIT(3) -#define OP_PREAMBLE_SHORT BIT(4) -#define OP_PROTECT_ENABLE BIT(5) -#define OP_ASSOCIATED BIT(6) -#define OP_ENABLE_BEACON BIT(7) -#define OP_LED_DEINIT BIT(8) -#define OP_BT_PRIORITY_DETECTED BIT(9) -#define OP_BT_SCAN BIT(10) +#define OP_ENABLE_BEACON BIT(4) +#define OP_LED_DEINIT BIT(5) +#define OP_BT_PRIORITY_DETECTED BIT(6) +#define OP_BT_SCAN BIT(7) +#define OP_ANI_RUNNING BIT(8) +#define OP_TSF_RESET BIT(9) struct ath9k_htc_priv { struct device *dev; @@ -358,15 +398,24 @@ struct ath9k_htc_priv { enum htc_endpoint_id data_vi_ep; enum htc_endpoint_id data_vo_ep; + u8 vif_slot; + u8 mon_vif_idx; + u8 sta_slot; + u8 vif_sta_pos[ATH9K_HTC_MAX_VIF]; + u8 num_ibss_vif; + u8 num_sta_vif; + u8 num_ap_vif; + u16 op_flags; u16 curtxpow; u16 txpowlimit; u16 nvifs; u16 nstations; - u16 seq_no; u32 bmiss_cnt; + bool rearm_ani; + bool reconfig_beacon; - struct ath9k_hw_cal_data caldata[ATH9K_NUM_CHANNELS]; + struct ath9k_hw_cal_data caldata; spinlock_t beacon_lock; @@ -382,7 +431,7 @@ struct ath9k_htc_priv { struct ath9k_htc_rx rx; struct tasklet_struct tx_tasklet; struct sk_buff_head tx_queue; - struct delayed_work ath9k_ani_work; + struct delayed_work ani_work; struct work_struct ps_work; struct work_struct fatal_work; @@ -424,6 +473,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv); void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv); void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, struct ieee80211_vif *vif); +void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv); void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending); void ath9k_htc_rxep(void *priv, struct sk_buff *skb, @@ -436,8 +486,9 @@ void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb, int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv); void ath9k_htc_station_work(struct work_struct *work); void ath9k_htc_aggr_work(struct work_struct *work); -void ath9k_ani_work(struct work_struct *work);; -void ath_start_ani(struct ath9k_htc_priv *priv); +void ath9k_htc_ani_work(struct work_struct *work); +void ath9k_htc_start_ani(struct ath9k_htc_priv *priv); +void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv); int ath9k_tx_init(struct ath9k_htc_priv *priv); void ath9k_tx_tasklet(unsigned long data); @@ -460,7 +511,6 @@ void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv); void ath9k_ps_work(struct work_struct *work); bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, enum ath9k_power_mode mode); -void ath_update_txpow(struct ath9k_htc_priv *priv); void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv); void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c index 87cc65a78a3f..8d1d8792436d 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c @@ -123,8 +123,9 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv, /* TSF out of range threshold fixed at 1 second */ bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; - ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); - ath_dbg(common, ATH_DBG_BEACON, + ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n", + intval, tsf, tsftu); + ath_dbg(common, ATH_DBG_CONFIG, "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", bs.bs_bmissthreshold, bs.bs_sleepduration, bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); @@ -138,25 +139,81 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv, WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask); } +static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv, + struct htc_beacon_config *bss_conf) +{ + struct ath_common *common = ath9k_hw_common(priv->ah); + enum ath9k_int imask = 0; + u32 nexttbtt, intval, tsftu; + __be32 htc_imask = 0; + int ret; + u8 cmd_rsp; + u64 tsf; + + intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD; + intval /= ATH9K_HTC_MAX_BCN_VIF; + nexttbtt = intval; + + if (priv->op_flags & OP_TSF_RESET) { + intval |= ATH9K_BEACON_RESET_TSF; + priv->op_flags &= ~OP_TSF_RESET; + } else { + /* + * Pull nexttbtt forward to reflect the current TSF. + */ + tsf = ath9k_hw_gettsf64(priv->ah); + tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE; + do { + nexttbtt += intval; + } while (nexttbtt < tsftu); + } + + intval |= ATH9K_BEACON_ENA; + + if (priv->op_flags & OP_ENABLE_BEACON) + imask |= ATH9K_INT_SWBA; + + ath_dbg(common, ATH_DBG_CONFIG, + "AP Beacon config, intval: %d, nexttbtt: %u imask: 0x%x\n", + bss_conf->beacon_interval, nexttbtt, imask); + + WMI_CMD(WMI_DISABLE_INTR_CMDID); + ath9k_hw_beaconinit(priv->ah, nexttbtt, intval); + priv->bmiss_cnt = 0; + htc_imask = cpu_to_be32(imask); + WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask); +} + static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv, struct htc_beacon_config *bss_conf) { struct ath_common *common = ath9k_hw_common(priv->ah); enum ath9k_int imask = 0; - u32 nexttbtt, intval; + u32 nexttbtt, intval, tsftu; __be32 htc_imask = 0; int ret; u8 cmd_rsp; + u64 tsf; intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD; nexttbtt = intval; + + /* + * Pull nexttbtt forward to reflect the current TSF. + */ + tsf = ath9k_hw_gettsf64(priv->ah); + tsftu = TSF_TO_TU(tsf >> 32, tsf) + FUDGE; + do { + nexttbtt += intval; + } while (nexttbtt < tsftu); + intval |= ATH9K_BEACON_ENA; if (priv->op_flags & OP_ENABLE_BEACON) imask |= ATH9K_INT_SWBA; - ath_dbg(common, ATH_DBG_BEACON, - "IBSS Beacon config, intval: %d, imask: 0x%x\n", - bss_conf->beacon_interval, imask); + ath_dbg(common, ATH_DBG_CONFIG, + "IBSS Beacon config, intval: %d, nexttbtt: %u, imask: 0x%x\n", + bss_conf->beacon_interval, nexttbtt, imask); WMI_CMD(WMI_DISABLE_INTR_CMDID); ath9k_hw_beaconinit(priv->ah, nexttbtt, intval); @@ -207,9 +264,9 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending) if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) beacon->data; - priv->seq_no += 0x10; + avp->seq_no += 0x10; hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); - hdr->seq_ctrl |= cpu_to_le16(priv->seq_no); + hdr->seq_ctrl |= cpu_to_le16(avp->seq_no); } tx_ctl.type = ATH9K_HTC_NORMAL; @@ -253,30 +310,123 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv) } } +static void ath9k_htc_beacon_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + bool *beacon_configured = (bool *)data; + struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv; + + if (vif->type == NL80211_IFTYPE_STATION && + avp->beacon_configured) + *beacon_configured = true; +} + +static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv, + struct ieee80211_vif *vif) +{ + struct ath_common *common = ath9k_hw_common(priv->ah); + struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + bool beacon_configured; + + /* + * Changing the beacon interval when multiple AP interfaces + * are configured will affect beacon transmission of all + * of them. + */ + if ((priv->ah->opmode == NL80211_IFTYPE_AP) && + (priv->num_ap_vif > 1) && + (vif->type == NL80211_IFTYPE_AP) && + (cur_conf->beacon_interval != bss_conf->beacon_int)) { + ath_dbg(common, ATH_DBG_CONFIG, + "Changing beacon interval of multiple AP interfaces !\n"); + return false; + } + + /* + * If the HW is operating in AP mode, any new station interfaces that + * are added cannot change the beacon parameters. + */ + if (priv->num_ap_vif && + (vif->type != NL80211_IFTYPE_AP)) { + ath_dbg(common, ATH_DBG_CONFIG, + "HW in AP mode, cannot set STA beacon parameters\n"); + return false; + } + + /* + * The beacon parameters are configured only for the first + * station interface. + */ + if ((priv->ah->opmode == NL80211_IFTYPE_STATION) && + (priv->num_sta_vif > 1) && + (vif->type == NL80211_IFTYPE_STATION)) { + beacon_configured = false; + ieee80211_iterate_active_interfaces_atomic(priv->hw, + ath9k_htc_beacon_iter, + &beacon_configured); + + if (beacon_configured) { + ath_dbg(common, ATH_DBG_CONFIG, + "Beacon already configured for a station interface\n"); + return false; + } + } + + return true; +} + void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, struct ieee80211_vif *vif) { struct ath_common *common = ath9k_hw_common(priv->ah); struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf; struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv; + + if (!ath9k_htc_check_beacon_config(priv, vif)) + return; cur_conf->beacon_interval = bss_conf->beacon_int; if (cur_conf->beacon_interval == 0) cur_conf->beacon_interval = 100; cur_conf->dtim_period = bss_conf->dtim_period; - cur_conf->listen_interval = 1; - cur_conf->dtim_count = 1; cur_conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; switch (vif->type) { case NL80211_IFTYPE_STATION: ath9k_htc_beacon_config_sta(priv, cur_conf); + avp->beacon_configured = true; + break; + case NL80211_IFTYPE_ADHOC: + ath9k_htc_beacon_config_adhoc(priv, cur_conf); + break; + case NL80211_IFTYPE_AP: + ath9k_htc_beacon_config_ap(priv, cur_conf); + break; + default: + ath_dbg(common, ATH_DBG_CONFIG, + "Unsupported beaconing mode\n"); + return; + } +} + +void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv) +{ + struct ath_common *common = ath9k_hw_common(priv->ah); + struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf; + + switch (priv->ah->opmode) { + case NL80211_IFTYPE_STATION: + ath9k_htc_beacon_config_sta(priv, cur_conf); break; case NL80211_IFTYPE_ADHOC: ath9k_htc_beacon_config_adhoc(priv, cur_conf); break; + case NL80211_IFTYPE_AP: + ath9k_htc_beacon_config_ap(priv, cur_conf); + break; default: ath_dbg(common, ATH_DBG_CONFIG, "Unsupported beaconing mode\n"); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c index fe70f67aa088..7e630a81b453 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c @@ -389,7 +389,8 @@ void ath9k_htc_radio_enable(struct ieee80211_hw *hw) ret, ah->curchan->channel); } - ath_update_txpow(priv); + ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit, + &priv->curtxpow); /* Start RX */ WMI_CMD(WMI_START_RECV_CMDID); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 0352f0994caa..fc67c937e172 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -294,6 +294,34 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset) return be32_to_cpu(val); } +static void ath9k_multi_regread(void *hw_priv, u32 *addr, + u32 *val, u16 count) +{ + struct ath_hw *ah = (struct ath_hw *) hw_priv; + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; + __be32 tmpaddr[8]; + __be32 tmpval[8]; + int i, ret; + + for (i = 0; i < count; i++) { + tmpaddr[i] = cpu_to_be32(addr[i]); + } + + ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID, + (u8 *)tmpaddr , sizeof(u32) * count, + (u8 *)tmpval, sizeof(u32) * count, + 100); + if (unlikely(ret)) { + ath_dbg(common, ATH_DBG_WMI, + "Multiple REGISTER READ FAILED (count: %d)\n", count); + } + + for (i = 0; i < count; i++) { + val[i] = be32_to_cpu(tmpval[i]); + } +} + static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset) { struct ath_hw *ah = (struct ath_hw *) hw_priv; @@ -404,6 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv) static const struct ath_ops ath9k_common_ops = { .read = ath9k_regread, + .multi_read = ath9k_multi_regread, .write = ath9k_regwrite, .enable_write_buffer = ath9k_enable_regwrite_buffer, .write_flush = ath9k_regwrite_flush, @@ -650,7 +679,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, (unsigned long)priv); tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv); - INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work); + INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work); INIT_WORK(&priv->ps_work, ath9k_ps_work); INIT_WORK(&priv->fatal_work, ath9k_fatal_work); @@ -758,6 +787,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv, struct ath_hw *ah; int error = 0; struct ath_regulatory *reg; + char hw_name[64]; /* Bring up device */ error = ath9k_init_priv(priv, devid, product, drv_info); @@ -798,6 +828,22 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv, goto err_world; } + ath_dbg(common, ATH_DBG_CONFIG, + "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, " + "BE:%d, BK:%d, VI:%d, VO:%d\n", + priv->wmi_cmd_ep, + priv->beacon_ep, + priv->cab_ep, + priv->uapsd_ep, + priv->mgmt_ep, + priv->data_be_ep, + priv->data_bk_ep, + priv->data_vi_ep, + priv->data_vo_ep); + + ath9k_hw_name(priv->ah, hw_name, sizeof(hw_name)); + wiphy_info(hw->wiphy, "%s\n", hw_name); + ath9k_init_leds(priv); ath9k_start_rfkill_poll(priv); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 6bb59958f71e..db8c0c044e9e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -24,17 +24,6 @@ static struct dentry *ath9k_debugfs_root; /* Utilities */ /*************/ -void ath_update_txpow(struct ath9k_htc_priv *priv) -{ - struct ath_hw *ah = priv->ah; - - if (priv->curtxpow != priv->txpowlimit) { - ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit, false); - /* read back in case value is clamped */ - priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit; - } -} - /* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */ static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv, struct ath9k_channel *ichan) @@ -116,12 +105,88 @@ void ath9k_ps_work(struct work_struct *work) ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP); } +static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct ath9k_htc_priv *priv = data; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + + if ((vif->type == NL80211_IFTYPE_AP) && bss_conf->enable_beacon) + priv->reconfig_beacon = true; + + if (bss_conf->assoc) { + priv->rearm_ani = true; + priv->reconfig_beacon = true; + } +} + +static void ath9k_htc_vif_reconfig(struct ath9k_htc_priv *priv) +{ + priv->rearm_ani = false; + priv->reconfig_beacon = false; + + ieee80211_iterate_active_interfaces_atomic(priv->hw, + ath9k_htc_vif_iter, priv); + if (priv->rearm_ani) + ath9k_htc_start_ani(priv); + + if (priv->reconfig_beacon) { + ath9k_htc_ps_wakeup(priv); + ath9k_htc_beacon_reconfig(priv); + ath9k_htc_ps_restore(priv); + } +} + +static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct ath9k_vif_iter_data *iter_data = data; + int i; + + for (i = 0; i < ETH_ALEN; i++) + iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); +} + +static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, + struct ieee80211_vif *vif) +{ + struct ath_common *common = ath9k_hw_common(priv->ah); + struct ath9k_vif_iter_data iter_data; + + /* + * Use the hardware MAC address as reference, the hardware uses it + * together with the BSSID mask when matching addresses. + */ + iter_data.hw_macaddr = common->macaddr; + memset(&iter_data.mask, 0xff, ETH_ALEN); + + if (vif) + ath9k_htc_bssid_iter(&iter_data, vif->addr, vif); + + /* Get list of all active MAC addresses */ + ieee80211_iterate_active_interfaces_atomic(priv->hw, ath9k_htc_bssid_iter, + &iter_data); + + memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); + ath_hw_setbssidmask(common); +} + +static void ath9k_htc_set_opmode(struct ath9k_htc_priv *priv) +{ + if (priv->num_ibss_vif) + priv->ah->opmode = NL80211_IFTYPE_ADHOC; + else if (priv->num_ap_vif) + priv->ah->opmode = NL80211_IFTYPE_AP; + else + priv->ah->opmode = NL80211_IFTYPE_STATION; + + ath9k_hw_setopmode(priv->ah); +} + void ath9k_htc_reset(struct ath9k_htc_priv *priv) { struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_channel *channel = priv->hw->conf.channel; - struct ath9k_hw_cal_data *caldata; + struct ath9k_hw_cal_data *caldata = NULL; enum htc_phymode mode; __be16 htc_mode; u8 cmd_rsp; @@ -130,16 +195,14 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv) mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); - if (priv->op_flags & OP_ASSOCIATED) - cancel_delayed_work_sync(&priv->ath9k_ani_work); - + ath9k_htc_stop_ani(priv); ieee80211_stop_queues(priv->hw); htc_stop(priv->htc); WMI_CMD(WMI_DISABLE_INTR_CMDID); WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); WMI_CMD(WMI_STOP_RECV_CMDID); - caldata = &priv->caldata[channel->hw_value]; + caldata = &priv->caldata; ret = ath9k_hw_reset(ah, ah->curchan, caldata, false); if (ret) { ath_err(common, @@ -147,7 +210,8 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv) channel->center_freq, ret); } - ath_update_txpow(priv); + ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit, + &priv->curtxpow); WMI_CMD(WMI_START_RECV_CMDID); ath9k_host_rx_init(priv); @@ -158,12 +222,7 @@ void ath9k_htc_reset(struct ath9k_htc_priv *priv) WMI_CMD(WMI_ENABLE_INTR_CMDID); htc_start(priv->htc); - - if (priv->op_flags & OP_ASSOCIATED) { - ath9k_htc_beacon_config(priv, priv->vif); - ath_start_ani(priv); - } - + ath9k_htc_vif_reconfig(priv); ieee80211_wake_queues(priv->hw); ath9k_htc_ps_restore(priv); @@ -179,7 +238,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, struct ieee80211_conf *conf = &common->hw->conf; bool fastcc; struct ieee80211_channel *channel = hw->conf.channel; - struct ath9k_hw_cal_data *caldata; + struct ath9k_hw_cal_data *caldata = NULL; enum htc_phymode mode; __be16 htc_mode; u8 cmd_rsp; @@ -202,7 +261,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf), fastcc); - caldata = &priv->caldata[channel->hw_value]; + if (!fastcc) + caldata = &priv->caldata; ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); if (ret) { ath_err(common, @@ -211,7 +271,8 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, goto err; } - ath_update_txpow(priv); + ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit, + &priv->curtxpow); WMI_CMD(WMI_START_RECV_CMDID); if (ret) @@ -230,11 +291,23 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, goto err; htc_start(priv->htc); + + if (!(priv->op_flags & OP_SCANNING) && + !(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) + ath9k_htc_vif_reconfig(priv); + err: ath9k_htc_ps_restore(priv); return ret; } +/* + * Monitor mode handling is a tad complicated because the firmware requires + * an interface to be created exclusively, while mac80211 doesn't associate + * an interface with the mode. + * + * So, for now, only one monitor interface can be configured. + */ static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) { struct ath_common *common = ath9k_hw_common(priv->ah); @@ -244,9 +317,10 @@ static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN); - hvif.index = 0; /* Should do for now */ + hvif.index = priv->mon_vif_idx; WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); priv->nvifs--; + priv->vif_slot &= ~(1 << priv->mon_vif_idx); } static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv) @@ -254,70 +328,87 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv) struct ath_common *common = ath9k_hw_common(priv->ah); struct ath9k_htc_target_vif hvif; struct ath9k_htc_target_sta tsta; - int ret = 0; + int ret = 0, sta_idx; u8 cmd_rsp; - if (priv->nvifs > 0) - return -ENOBUFS; + if ((priv->nvifs >= ATH9K_HTC_MAX_VIF) || + (priv->nstations >= ATH9K_HTC_MAX_STA)) { + ret = -ENOBUFS; + goto err_vif; + } - if (priv->nstations >= ATH9K_HTC_MAX_STA) - return -ENOBUFS; + sta_idx = ffz(priv->sta_slot); + if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA)) { + ret = -ENOBUFS; + goto err_vif; + } /* * Add an interface. */ - memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN); hvif.opmode = cpu_to_be32(HTC_M_MONITOR); - priv->ah->opmode = NL80211_IFTYPE_MONITOR; - hvif.index = priv->nvifs; + hvif.index = ffz(priv->vif_slot); WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif); if (ret) - return ret; + goto err_vif; + + /* + * Assign the monitor interface index as a special case here. + * This is needed when the interface is brought down. + */ + priv->mon_vif_idx = hvif.index; + priv->vif_slot |= (1 << hvif.index); + + /* + * Set the hardware mode to monitor only if there are no + * other interfaces. + */ + if (!priv->nvifs) + priv->ah->opmode = NL80211_IFTYPE_MONITOR; priv->nvifs++; /* * Associate a station with the interface for packet injection. */ - memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta)); memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN); tsta.is_vif_sta = 1; - tsta.sta_index = priv->nstations; + tsta.sta_index = sta_idx; tsta.vif_index = hvif.index; tsta.maxampdu = 0xffff; WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta); if (ret) { ath_err(common, "Unable to add station entry for monitor mode\n"); - goto err_vif; + goto err_sta; } + priv->sta_slot |= (1 << sta_idx); priv->nstations++; - - /* - * Set chainmask etc. on the target. - */ - ret = ath9k_htc_update_cap_target(priv); - if (ret) - ath_dbg(common, ATH_DBG_CONFIG, - "Failed to update capability in target\n"); - + priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx; priv->ah->is_monitoring = true; + ath_dbg(common, ATH_DBG_CONFIG, + "Attached a monitor interface at idx: %d, sta idx: %d\n", + priv->mon_vif_idx, sta_idx); + return 0; -err_vif: +err_sta: /* * Remove the interface from the target. */ __ath9k_htc_remove_monitor_interface(priv); +err_vif: + ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n"); + return ret; } @@ -329,7 +420,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) __ath9k_htc_remove_monitor_interface(priv); - sta_idx = 0; /* Only single interface, for now */ + sta_idx = priv->vif_sta_pos[priv->mon_vif_idx]; WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx); if (ret) { @@ -337,9 +428,14 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) return ret; } + priv->sta_slot &= ~(1 << sta_idx); priv->nstations--; priv->ah->is_monitoring = false; + ath_dbg(common, ATH_DBG_CONFIG, + "Removed a monitor interface at idx: %d, sta idx: %d\n", + priv->mon_vif_idx, sta_idx); + return 0; } @@ -351,12 +447,16 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv, struct ath9k_htc_target_sta tsta; struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv; struct ath9k_htc_sta *ista; - int ret; + int ret, sta_idx; u8 cmd_rsp; if (priv->nstations >= ATH9K_HTC_MAX_STA) return -ENOBUFS; + sta_idx = ffz(priv->sta_slot); + if ((sta_idx < 0) || (sta_idx > ATH9K_HTC_MAX_STA)) + return -ENOBUFS; + memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta)); if (sta) { @@ -366,13 +466,13 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv, tsta.associd = common->curaid; tsta.is_vif_sta = 0; tsta.valid = true; - ista->index = priv->nstations; + ista->index = sta_idx; } else { memcpy(&tsta.macaddr, vif->addr, ETH_ALEN); tsta.is_vif_sta = 1; } - tsta.sta_index = priv->nstations; + tsta.sta_index = sta_idx; tsta.vif_index = avp->index; tsta.maxampdu = 0xffff; if (sta && sta->ht_cap.ht_supported) @@ -387,12 +487,21 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv, return ret; } - if (sta) + if (sta) { ath_dbg(common, ATH_DBG_CONFIG, "Added a station entry for: %pM (idx: %d)\n", sta->addr, tsta.sta_index); + } else { + ath_dbg(common, ATH_DBG_CONFIG, + "Added a station entry for VIF %d (idx: %d)\n", + avp->index, tsta.sta_index); + } + priv->sta_slot |= (1 << sta_idx); priv->nstations++; + if (!sta) + priv->vif_sta_pos[avp->index] = sta_idx; + return 0; } @@ -401,6 +510,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv, struct ieee80211_sta *sta) { struct ath_common *common = ath9k_hw_common(priv->ah); + struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv; struct ath9k_htc_sta *ista; int ret; u8 cmd_rsp, sta_idx; @@ -409,7 +519,7 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv, ista = (struct ath9k_htc_sta *) sta->drv_priv; sta_idx = ista->index; } else { - sta_idx = 0; + sta_idx = priv->vif_sta_pos[avp->index]; } WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx); @@ -421,12 +531,19 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv, return ret; } - if (sta) + if (sta) { ath_dbg(common, ATH_DBG_CONFIG, "Removed a station entry for: %pM (idx: %d)\n", sta->addr, sta_idx); + } else { + ath_dbg(common, ATH_DBG_CONFIG, + "Removed a station entry for VIF %d (idx: %d)\n", + avp->index, sta_idx); + } + priv->sta_slot &= ~(1 << sta_idx); priv->nstations--; + return 0; } @@ -808,7 +925,7 @@ void ath9k_htc_debug_remove_root(void) /* ANI */ /*******/ -void ath_start_ani(struct ath9k_htc_priv *priv) +void ath9k_htc_start_ani(struct ath9k_htc_priv *priv) { struct ath_common *common = ath9k_hw_common(priv->ah); unsigned long timestamp = jiffies_to_msecs(jiffies); @@ -817,15 +934,22 @@ void ath_start_ani(struct ath9k_htc_priv *priv) common->ani.shortcal_timer = timestamp; common->ani.checkani_timer = timestamp; - ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work, + priv->op_flags |= OP_ANI_RUNNING; + + ieee80211_queue_delayed_work(common->hw, &priv->ani_work, msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); } -void ath9k_ani_work(struct work_struct *work) +void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv) +{ + cancel_delayed_work_sync(&priv->ani_work); + priv->op_flags &= ~OP_ANI_RUNNING; +} + +void ath9k_htc_ani_work(struct work_struct *work) { struct ath9k_htc_priv *priv = - container_of(work, struct ath9k_htc_priv, - ath9k_ani_work.work); + container_of(work, struct ath9k_htc_priv, ani_work.work); struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); bool longcal = false; @@ -834,7 +958,8 @@ void ath9k_ani_work(struct work_struct *work) unsigned int timestamp = jiffies_to_msecs(jiffies); u32 cal_interval, short_cal_interval; - short_cal_interval = ATH_STA_SHORT_CALINTERVAL; + short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ? + ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL; /* Only calibrate if awake */ if (ah->power_mode != ATH9K_PM_AWAKE) @@ -903,7 +1028,7 @@ set_timer: if (!common->ani.caldone) cal_interval = min(cal_interval, (u32)short_cal_interval); - ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work, + ieee80211_queue_delayed_work(common->hw, &priv->ani_work, msecs_to_jiffies(cal_interval)); } @@ -911,7 +1036,7 @@ set_timer: /* mac80211 Callbacks */ /**********************/ -static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ieee80211_hdr *hdr; struct ath9k_htc_priv *priv = hw->priv; @@ -924,7 +1049,7 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) padsize = padpos & 3; if (padsize && skb->len > padpos) { if (skb_headroom(skb) < padsize) - return -1; + goto fail_tx; skb_push(skb, padsize); memmove(skb->data, skb->data + padsize, padpos); } @@ -945,11 +1070,10 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb) goto fail_tx; } - return 0; + return; fail_tx: dev_kfree_skb_any(skb); - return 0; } static int ath9k_htc_start(struct ieee80211_hw *hw) @@ -987,7 +1111,8 @@ static int ath9k_htc_start(struct ieee80211_hw *hw) return ret; } - ath_update_txpow(priv); + ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit, + &priv->curtxpow); mode = ath9k_htc_get_curmode(priv, init_channel); htc_mode = cpu_to_be16(mode); @@ -997,6 +1122,11 @@ static int ath9k_htc_start(struct ieee80211_hw *hw) ath9k_host_rx_init(priv); + ret = ath9k_htc_update_cap_target(priv); + if (ret) + ath_dbg(common, ATH_DBG_CONFIG, + "Failed to update capability in target\n"); + priv->op_flags &= ~OP_INVALID; htc_start(priv->htc); @@ -1051,25 +1181,21 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw) cancel_work_sync(&priv->fatal_work); cancel_work_sync(&priv->ps_work); cancel_delayed_work_sync(&priv->ath9k_led_blink_work); + ath9k_htc_stop_ani(priv); ath9k_led_stop_brightness(priv); mutex_lock(&priv->mutex); - /* Remove monitor interface here */ - if (ah->opmode == NL80211_IFTYPE_MONITOR) { - if (ath9k_htc_remove_monitor_interface(priv)) - ath_err(common, "Unable to remove monitor interface\n"); - else - ath_dbg(common, ATH_DBG_CONFIG, - "Monitor interface removed\n"); - } - if (ah->btcoex_hw.enabled) { ath9k_hw_btcoex_disable(ah); if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) ath_htc_cancel_btcoex_work(priv); } + /* Remove a monitor interface if it's present. */ + if (priv->ah->is_monitoring) + ath9k_htc_remove_monitor_interface(priv); + ath9k_hw_phy_disable(ah); ath9k_hw_disable(ah); ath9k_htc_ps_restore(priv); @@ -1093,10 +1219,24 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, mutex_lock(&priv->mutex); - /* Only one interface for now */ - if (priv->nvifs > 0) { - ret = -ENOBUFS; - goto out; + if (priv->nvifs >= ATH9K_HTC_MAX_VIF) { + mutex_unlock(&priv->mutex); + return -ENOBUFS; + } + + if (priv->num_ibss_vif || + (priv->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) { + ath_err(common, "IBSS coexistence with other modes is not allowed\n"); + mutex_unlock(&priv->mutex); + return -ENOBUFS; + } + + if (((vif->type == NL80211_IFTYPE_AP) || + (vif->type == NL80211_IFTYPE_ADHOC)) && + ((priv->num_ap_vif + priv->num_ibss_vif) >= ATH9K_HTC_MAX_BCN_VIF)) { + ath_err(common, "Max. number of beaconing interfaces reached\n"); + mutex_unlock(&priv->mutex); + return -ENOBUFS; } ath9k_htc_ps_wakeup(priv); @@ -1110,6 +1250,9 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, case NL80211_IFTYPE_ADHOC: hvif.opmode = cpu_to_be32(HTC_M_IBSS); break; + case NL80211_IFTYPE_AP: + hvif.opmode = cpu_to_be32(HTC_M_HOSTAP); + break; default: ath_err(common, "Interface type %d not yet supported\n", vif->type); @@ -1117,34 +1260,39 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, goto out; } - ath_dbg(common, ATH_DBG_CONFIG, - "Attach a VIF of type: %d\n", vif->type); - - priv->ah->opmode = vif->type; - /* Index starts from zero on the target */ - avp->index = hvif.index = priv->nvifs; + avp->index = hvif.index = ffz(priv->vif_slot); hvif.rtsthreshold = cpu_to_be16(2304); WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif); if (ret) goto out; - priv->nvifs++; - /* * We need a node in target to tx mgmt frames * before association. */ ret = ath9k_htc_add_station(priv, vif, NULL); - if (ret) + if (ret) { + WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); goto out; + } - ret = ath9k_htc_update_cap_target(priv); - if (ret) - ath_dbg(common, ATH_DBG_CONFIG, - "Failed to update capability in target\n"); + ath9k_htc_set_bssid_mask(priv, vif); + priv->vif_slot |= (1 << avp->index); + priv->nvifs++; priv->vif = vif; + + INC_VIF(priv, vif->type); + ath9k_htc_set_opmode(priv); + + if ((priv->ah->opmode == NL80211_IFTYPE_AP) && + !(priv->op_flags & OP_ANI_RUNNING)) + ath9k_htc_start_ani(priv); + + ath_dbg(common, ATH_DBG_CONFIG, + "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index); + out: ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); @@ -1162,8 +1310,6 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, int ret = 0; u8 cmd_rsp; - ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); - mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); @@ -1172,10 +1318,27 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, hvif.index = avp->index; WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif); priv->nvifs--; + priv->vif_slot &= ~(1 << avp->index); ath9k_htc_remove_station(priv, vif, NULL); priv->vif = NULL; + DEC_VIF(priv, vif->type); + ath9k_htc_set_opmode(priv); + + /* + * Stop ANI only if there are no associated station interfaces. + */ + if ((vif->type == NL80211_IFTYPE_AP) && (priv->num_ap_vif == 0)) { + priv->rearm_ani = false; + ieee80211_iterate_active_interfaces_atomic(priv->hw, + ath9k_htc_vif_iter, priv); + if (!priv->rearm_ani) + ath9k_htc_stop_ani(priv); + } + + ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index); + ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } @@ -1211,13 +1374,11 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) * IEEE80211_CONF_CHANGE_CHANNEL is handled. */ if (changed & IEEE80211_CONF_CHANGE_MONITOR) { - if (conf->flags & IEEE80211_CONF_MONITOR) { - if (ath9k_htc_add_monitor_interface(priv)) - ath_err(common, "Failed to set monitor mode\n"); - else - ath_dbg(common, ATH_DBG_CONFIG, - "HW opmode set to Monitor mode\n"); - } + if ((conf->flags & IEEE80211_CONF_MONITOR) && + !priv->ah->is_monitoring) + ath9k_htc_add_monitor_interface(priv); + else if (priv->ah->is_monitoring) + ath9k_htc_remove_monitor_interface(priv); } if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { @@ -1252,7 +1413,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed) if (changed & IEEE80211_CONF_CHANGE_POWER) { priv->txpowlimit = 2 * conf->power_level; - ath_update_txpow(priv); + ath9k_cmn_update_txpow(priv->ah, priv->curtxpow, + priv->txpowlimit, &priv->curtxpow); } if (changed & IEEE80211_CONF_CHANGE_IDLE) { @@ -1439,66 +1601,81 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, struct ath9k_htc_priv *priv = hw->priv; struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); + bool set_assoc; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); + /* + * Set the HW AID/BSSID only for the first station interface + * or in IBSS mode. + */ + set_assoc = !!((priv->ah->opmode == NL80211_IFTYPE_ADHOC) || + ((priv->ah->opmode == NL80211_IFTYPE_STATION) && + (priv->num_sta_vif == 1))); + + if (changed & BSS_CHANGED_ASSOC) { - common->curaid = bss_conf->assoc ? - bss_conf->aid : 0; - ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", - bss_conf->assoc); - - if (bss_conf->assoc) { - priv->op_flags |= OP_ASSOCIATED; - ath_start_ani(priv); - } else { - priv->op_flags &= ~OP_ASSOCIATED; - cancel_delayed_work_sync(&priv->ath9k_ani_work); + if (set_assoc) { + ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", + bss_conf->assoc); + + common->curaid = bss_conf->assoc ? + bss_conf->aid : 0; + + if (bss_conf->assoc) + ath9k_htc_start_ani(priv); + else + ath9k_htc_stop_ani(priv); } } if (changed & BSS_CHANGED_BSSID) { - /* Set BSSID */ - memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); - ath9k_hw_write_associd(ah); + if (set_assoc) { + memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); + ath9k_hw_write_associd(ah); - ath_dbg(common, ATH_DBG_CONFIG, - "BSSID: %pM aid: 0x%x\n", - common->curbssid, common->curaid); + ath_dbg(common, ATH_DBG_CONFIG, + "BSSID: %pM aid: 0x%x\n", + common->curbssid, common->curaid); + } } - if ((changed & BSS_CHANGED_BEACON_INT) || - (changed & BSS_CHANGED_BEACON) || - ((changed & BSS_CHANGED_BEACON_ENABLED) && - bss_conf->enable_beacon)) { + if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) { + ath_dbg(common, ATH_DBG_CONFIG, + "Beacon enabled for BSS: %pM\n", bss_conf->bssid); priv->op_flags |= OP_ENABLE_BEACON; ath9k_htc_beacon_config(priv, vif); } - if ((changed & BSS_CHANGED_BEACON_ENABLED) && - !bss_conf->enable_beacon) { - priv->op_flags &= ~OP_ENABLE_BEACON; - ath9k_htc_beacon_config(priv, vif); - } - - if (changed & BSS_CHANGED_ERP_PREAMBLE) { - ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", - bss_conf->use_short_preamble); - if (bss_conf->use_short_preamble) - priv->op_flags |= OP_PREAMBLE_SHORT; - else - priv->op_flags &= ~OP_PREAMBLE_SHORT; + if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) { + /* + * Disable SWBA interrupt only if there are no + * AP/IBSS interfaces. + */ + if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) { + ath_dbg(common, ATH_DBG_CONFIG, + "Beacon disabled for BSS: %pM\n", + bss_conf->bssid); + priv->op_flags &= ~OP_ENABLE_BEACON; + ath9k_htc_beacon_config(priv, vif); + } } - if (changed & BSS_CHANGED_ERP_CTS_PROT) { - ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n", - bss_conf->use_cts_prot); - if (bss_conf->use_cts_prot && - hw->conf.channel->band != IEEE80211_BAND_5GHZ) - priv->op_flags |= OP_PROTECT_ENABLE; - else - priv->op_flags &= ~OP_PROTECT_ENABLE; + if (changed & BSS_CHANGED_BEACON_INT) { + /* + * Reset the HW TSF for the first AP interface. + */ + if ((priv->ah->opmode == NL80211_IFTYPE_AP) && + (priv->nvifs == 1) && + (priv->num_ap_vif == 1) && + (vif->type == NL80211_IFTYPE_AP)) { + priv->op_flags |= OP_TSF_RESET; + } + ath_dbg(common, ATH_DBG_CONFIG, + "Beacon interval changed for BSS: %pM\n", + bss_conf->bssid); + ath9k_htc_beacon_config(priv, vif); } if (changed & BSS_CHANGED_ERP_SLOT) { @@ -1557,12 +1734,14 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, - u16 tid, u16 *ssn) + u16 tid, u16 *ssn, u8 buf_size) { struct ath9k_htc_priv *priv = hw->priv; struct ath9k_htc_sta *ista; int ret = 0; + mutex_lock(&priv->mutex); + switch (action) { case IEEE80211_AMPDU_RX_START: break; @@ -1587,6 +1766,8 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n"); } + mutex_unlock(&priv->mutex); + return ret; } @@ -1599,8 +1780,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw) priv->op_flags |= OP_SCANNING; spin_unlock_bh(&priv->beacon_lock); cancel_work_sync(&priv->ps_work); - if (priv->op_flags & OP_ASSOCIATED) - cancel_delayed_work_sync(&priv->ath9k_ani_work); + ath9k_htc_stop_ani(priv); mutex_unlock(&priv->mutex); } @@ -1609,14 +1789,11 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw) struct ath9k_htc_priv *priv = hw->priv; mutex_lock(&priv->mutex); - ath9k_htc_ps_wakeup(priv); spin_lock_bh(&priv->beacon_lock); priv->op_flags &= ~OP_SCANNING; spin_unlock_bh(&priv->beacon_lock); - if (priv->op_flags & OP_ASSOCIATED) { - ath9k_htc_beacon_config(priv, priv->vif); - ath_start_ani(priv); - } + ath9k_htc_ps_wakeup(priv); + ath9k_htc_vif_reconfig(priv); ath9k_htc_ps_restore(priv); mutex_unlock(&priv->mutex); } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 7a5ffca21958..4a4f27ba96af 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -84,7 +84,9 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) struct ieee80211_hdr *hdr; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = tx_info->control.sta; + struct ieee80211_vif *vif = tx_info->control.vif; struct ath9k_htc_sta *ista; + struct ath9k_htc_vif *avp; struct ath9k_htc_tx_ctl tx_ctl; enum htc_endpoint_id epid; u16 qnum; @@ -95,18 +97,31 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) hdr = (struct ieee80211_hdr *) skb->data; fc = hdr->frame_control; - if (tx_info->control.vif && - (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv) - vif_idx = ((struct ath9k_htc_vif *) - tx_info->control.vif->drv_priv)->index; - else - vif_idx = priv->nvifs; + /* + * Find out on which interface this packet has to be + * sent out. + */ + if (vif) { + avp = (struct ath9k_htc_vif *) vif->drv_priv; + vif_idx = avp->index; + } else { + if (!priv->ah->is_monitoring) { + ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT, + "VIF is null, but no monitor interface !\n"); + return -EINVAL; + } + vif_idx = priv->mon_vif_idx; + } + + /* + * Find out which station this packet is destined for. + */ if (sta) { ista = (struct ath9k_htc_sta *) sta->drv_priv; sta_idx = ista->index; } else { - sta_idx = 0; + sta_idx = priv->vif_sta_pos[vif_idx]; } memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl)); @@ -141,7 +156,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) /* CTS-to-self */ if (!(flags & ATH9K_HTC_TX_RTSCTS) && - (priv->op_flags & OP_PROTECT_ENABLE)) + (vif && vif->bss_conf.use_cts_prot)) flags |= ATH9K_HTC_TX_CTSONLY; tx_hdr.flags = cpu_to_be32(flags); @@ -217,6 +232,7 @@ static bool ath9k_htc_check_tx_aggr(struct ath9k_htc_priv *priv, void ath9k_tx_tasklet(unsigned long data) { struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; + struct ieee80211_vif *vif; struct ieee80211_sta *sta; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *tx_info; @@ -228,12 +244,16 @@ void ath9k_tx_tasklet(unsigned long data) hdr = (struct ieee80211_hdr *) skb->data; fc = hdr->frame_control; tx_info = IEEE80211_SKB_CB(skb); + vif = tx_info->control.vif; memset(&tx_info->status, 0, sizeof(tx_info->status)); + if (!vif) + goto send_mac80211; + rcu_read_lock(); - sta = ieee80211_find_sta(priv->vif, hdr->addr1); + sta = ieee80211_find_sta(vif, hdr->addr1); if (!sta) { rcu_read_unlock(); ieee80211_tx_status(priv->hw, skb); @@ -263,6 +283,7 @@ void ath9k_tx_tasklet(unsigned long data) rcu_read_unlock(); + send_mac80211: /* Send status to mac80211 */ ieee80211_tx_status(priv->hw, skb); } @@ -386,7 +407,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv) */ if (((ah->opmode != NL80211_IFTYPE_AP) && (priv->rxfilter & FIF_PROMISC_IN_BSS)) || - (ah->opmode == NL80211_IFTYPE_MONITOR)) + ah->is_monitoring) rfilt |= ATH9K_RX_FILTER_PROM; if (priv->rxfilter & FIF_CONTROL) @@ -398,8 +419,13 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv) else rfilt |= ATH9K_RX_FILTER_BEACON; - if (conf_is_ht(&priv->hw->conf)) + if (conf_is_ht(&priv->hw->conf)) { rfilt |= ATH9K_RX_FILTER_COMP_BAR; + rfilt |= ATH9K_RX_FILTER_UNCOMP_BA_BAR; + } + + if (priv->rxfilter & FIF_PSPOLL) + rfilt |= ATH9K_RX_FILTER_PSPOLL; return rfilt; @@ -412,20 +438,12 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv) static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv) { struct ath_hw *ah = priv->ah; - struct ath_common *common = ath9k_hw_common(ah); - u32 rfilt, mfilt[2]; /* configure rx filter */ rfilt = ath9k_htc_calcrxfilter(priv); ath9k_hw_setrxfilter(ah, rfilt); - /* configure bssid mask */ - ath_hw_setbssidmask(common); - - /* configure operational mode */ - ath9k_hw_setopmode(ah); - /* calculate and install multicast filter */ mfilt[0] = mfilt[1] = ~0; ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); @@ -576,31 +594,29 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate, rxbuf->rxstatus.rs_flags); - if (priv->op_flags & OP_ASSOCIATED) { - if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD && - !rxbuf->rxstatus.rs_moreaggr) - ATH_RSSI_LPF(priv->rx.last_rssi, - rxbuf->rxstatus.rs_rssi); + if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD && + !rxbuf->rxstatus.rs_moreaggr) + ATH_RSSI_LPF(priv->rx.last_rssi, + rxbuf->rxstatus.rs_rssi); - last_rssi = priv->rx.last_rssi; + last_rssi = priv->rx.last_rssi; - if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) - rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi, - ATH_RSSI_EP_MULTIPLIER); + if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) + rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi, + ATH_RSSI_EP_MULTIPLIER); - if (rxbuf->rxstatus.rs_rssi < 0) - rxbuf->rxstatus.rs_rssi = 0; + if (rxbuf->rxstatus.rs_rssi < 0) + rxbuf->rxstatus.rs_rssi = 0; - if (ieee80211_is_beacon(fc)) - priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi; - } + if (ieee80211_is_beacon(fc)) + priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi; rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp); rx_status->band = hw->conf.channel->band; rx_status->freq = hw->conf.channel->center_freq; rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR; rx_status->antenna = rxbuf->rxstatus.rs_antenna; - rx_status->flag |= RX_FLAG_TSFT; + rx_status->flag |= RX_FLAG_MACTIME_MPDU; return true; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 9f01e50d5cda..338b07502f1a 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -495,6 +495,17 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (ah->hw_version.devid == AR5416_AR9100_DEVID) ah->hw_version.macVersion = AR_SREV_VERSION_9100; + ath9k_hw_read_revisions(ah); + + /* + * Read back AR_WA into a permanent copy and set bits 14 and 17. + * We need to do this to avoid RMW of this register. We cannot + * read the reg when chip is asleep. + */ + ah->WARegVal = REG_READ(ah, AR_WA); + ah->WARegVal |= (AR_WA_D3_L1_DISABLE | + AR_WA_ASPM_TIMER_BASED_DISABLE); + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { ath_err(common, "Couldn't reset chip\n"); return -EIO; @@ -563,14 +574,6 @@ static int __ath9k_hw_init(struct ath_hw *ah) ath9k_hw_init_mode_regs(ah); - /* - * Read back AR_WA into a permanent copy and set bits 14 and 17. - * We need to do this to avoid RMW of this register. We cannot - * read the reg when chip is asleep. - */ - ah->WARegVal = REG_READ(ah, AR_WA); - ah->WARegVal |= (AR_WA_D3_L1_DISABLE | - AR_WA_ASPM_TIMER_BASED_DISABLE); if (ah->is_pciexpress) ath9k_hw_configpcipowersave(ah, 0, 0); @@ -668,14 +671,51 @@ static void ath9k_hw_init_qos(struct ath_hw *ah) REGWRITE_BUFFER_FLUSH(ah); } +unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah) +{ + REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) & ~(PLL3_DO_MEAS_MASK))); + udelay(100); + REG_WRITE(ah, PLL3, (REG_READ(ah, PLL3) | PLL3_DO_MEAS_MASK)); + + while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) + udelay(100); + + return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3; +} +EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc); + +#define DPLL2_KD_VAL 0x3D +#define DPLL2_KI_VAL 0x06 +#define DPLL3_PHASE_SHIFT_VAL 0x1 + static void ath9k_hw_init_pll(struct ath_hw *ah, struct ath9k_channel *chan) { u32 pll; - if (AR_SREV_9485(ah)) + if (AR_SREV_9485(ah)) { + REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666); + REG_WRITE(ah, AR_CH0_DDR_DPLL2, 0x19e82f01); + + REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3, + AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL); + + REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c); + udelay(1000); + REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666); + REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, + AR_CH0_DPLL2_KD, DPLL2_KD_VAL); + REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, + AR_CH0_DPLL2_KI, DPLL2_KI_VAL); + + REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3, + AR_CH0_DPLL3_PHASE_SHIFT, DPLL3_PHASE_SHIFT_VAL); + REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x142c); + udelay(1000); + } + pll = ath9k_hw_compute_pll_control(ah, chan); REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); @@ -1060,7 +1100,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) REG_WRITE(ah, AR_RC, AR_RC_AHB); REG_WRITE(ah, AR_RTC_RESET, 0); - udelay(2); REGWRITE_BUFFER_FLUSH(ah); @@ -1082,8 +1121,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) return false; } - ath9k_hw_read_revisions(ah); - return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); } @@ -1348,8 +1385,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_spur_mitigate_freq(ah, chan); ah->eep_ops->set_board_values(ah, chan); - ath9k_hw_set_operating_mode(ah, ah->opmode); - ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr)); @@ -1367,6 +1402,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, REGWRITE_BUFFER_FLUSH(ah); + ath9k_hw_set_operating_mode(ah, ah->opmode); + r = ath9k_hw_rf_set_freq(ah, chan); if (r) return r; diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index ea9fde670646..6650fd48415c 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -70,6 +70,9 @@ #define REG_READ(_ah, _reg) \ ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) +#define REG_READ_MULTI(_ah, _addr, _val, _cnt) \ + ath9k_hw_common(_ah)->ops->multi_read((_ah), (_addr), (_val), (_cnt)) + #define ENABLE_REGWRITE_BUFFER(_ah) \ do { \ if (ath9k_hw_common(_ah)->ops->enable_write_buffer) \ @@ -92,9 +95,9 @@ #define REG_READ_FIELD(_a, _r, _f) \ (((REG_READ(_a, _r) & _f) >> _f##_S)) #define REG_SET_BIT(_a, _r, _f) \ - REG_WRITE(_a, _r, REG_READ(_a, _r) | _f) + REG_WRITE(_a, _r, REG_READ(_a, _r) | (_f)) #define REG_CLR_BIT(_a, _r, _f) \ - REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f) + REG_WRITE(_a, _r, REG_READ(_a, _r) & ~(_f)) #define DO_DELAY(x) do { \ if ((++(x) % 64) == 0) \ @@ -926,6 +929,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); void ath9k_hw_reset_tsf(struct ath_hw *ah); void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); void ath9k_hw_init_global_settings(struct ath_hw *ah); +unsigned long ar9003_get_pll_sqsum_dvc(struct ath_hw *ah); void ath9k_hw_set11nmac2040(struct ath_hw *ah); void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index a033d01bf8a0..79aec983279f 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -140,6 +140,21 @@ static struct ieee80211_rate ath9k_legacy_rates[] = { RATE(540, 0x0c, 0), }; +#ifdef CONFIG_MAC80211_LEDS +static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = { + { .throughput = 0 * 1024, .blink_time = 334 }, + { .throughput = 1 * 1024, .blink_time = 260 }, + { .throughput = 5 * 1024, .blink_time = 220 }, + { .throughput = 10 * 1024, .blink_time = 190 }, + { .throughput = 20 * 1024, .blink_time = 170 }, + { .throughput = 50 * 1024, .blink_time = 150 }, + { .throughput = 70 * 1024, .blink_time = 130 }, + { .throughput = 100 * 1024, .blink_time = 110 }, + { .throughput = 200 * 1024, .blink_time = 80 }, + { .throughput = 300 * 1024, .blink_time = 50 }, +}; +#endif + static void ath9k_deinit_softc(struct ath_softc *sc); /* @@ -250,8 +265,7 @@ static int ath9k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah); return ath_reg_notifier_apply(wiphy, request, reg); @@ -438,9 +452,10 @@ static int ath9k_init_queues(struct ath_softc *sc) sc->config.cabqReadytime = ATH_CABQ_READY_TIME; ath_cabq_update(sc); - for (i = 0; i < WME_NUM_AC; i++) + for (i = 0; i < WME_NUM_AC; i++) { sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i); - + sc->tx.txq_map[i]->mac80211_qnum = i; + } return 0; } @@ -512,10 +527,8 @@ static void ath9k_init_misc(struct ath_softc *sc) sc->beacon.slottime = ATH9K_SLOT_TIME_9; - for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { + for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) sc->beacon.bslot[i] = NULL; - sc->beacon.bslot_aphy[i] = NULL; - } if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT; @@ -533,6 +546,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, if (!ah) return -ENOMEM; + ah->hw = sc->hw; ah->hw_version.devid = devid; ah->hw_version.subsysid = subsysid; sc->sc_ah = ah; @@ -550,10 +564,13 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid, common->btcoex_enabled = ath9k_btcoex_enable == 1; spin_lock_init(&common->cc_lock); - spin_lock_init(&sc->wiphy_lock); spin_lock_init(&sc->sc_serial_rw); spin_lock_init(&sc->sc_pm_lock); mutex_init(&sc->mutex); +#ifdef CONFIG_ATH9K_DEBUGFS + spin_lock_init(&sc->nodes_lock); + INIT_LIST_HEAD(&sc->nodes); +#endif tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, (unsigned long)sc); @@ -695,7 +712,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, const struct ath_bus_ops *bus_ops) { struct ieee80211_hw *hw = sc->hw; - struct ath_wiphy *aphy = hw->priv; struct ath_common *common; struct ath_hw *ah; int error = 0; @@ -730,6 +746,13 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, ath9k_init_txpower_limits(sc); +#ifdef CONFIG_MAC80211_LEDS + /* must be initialized before ieee80211_register_hw */ + sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw, + IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink, + ARRAY_SIZE(ath9k_tpt_blink)); +#endif + /* Register with mac80211 */ error = ieee80211_register_hw(hw); if (error) @@ -750,10 +773,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, INIT_WORK(&sc->hw_check_work, ath_hw_check); INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); - INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); - INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work); - sc->wiphy_scheduler_int = msecs_to_jiffies(500); - aphy->last_rssi = ATH_RSSI_DUMMY_MARKER; + sc->last_rssi = ATH_RSSI_DUMMY_MARKER; ath_init_leds(sc); ath_start_rfkill_poll(sc); @@ -805,7 +825,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc) void ath9k_deinit_device(struct ath_softc *sc) { struct ieee80211_hw *hw = sc->hw; - int i = 0; ath9k_ps_wakeup(sc); @@ -814,20 +833,10 @@ void ath9k_deinit_device(struct ath_softc *sc) ath9k_ps_restore(sc); - for (i = 0; i < sc->num_sec_wiphy; i++) { - struct ath_wiphy *aphy = sc->sec_wiphy[i]; - if (aphy == NULL) - continue; - sc->sec_wiphy[i] = NULL; - ieee80211_unregister_hw(aphy->hw); - ieee80211_free_hw(aphy->hw); - } - ieee80211_unregister_hw(hw); ath_rx_cleanup(sc); ath_tx_cleanup(sc); ath9k_deinit_softc(sc); - kfree(sc->sec_wiphy); } void ath_descdma_cleanup(struct ath_softc *sc, diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 2915b11edefb..562257ac52cf 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -143,84 +143,59 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) } EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel); -bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q) +void ath9k_hw_abort_tx_dma(struct ath_hw *ah) { -#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */ -#define ATH9K_TIME_QUANTUM 100 /* usec */ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_capabilities *pCap = &ah->caps; - struct ath9k_tx_queue_info *qi; - u32 tsfLow, j, wait; - u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; + int i, q; - if (q >= pCap->total_queues) { - ath_dbg(common, ATH_DBG_QUEUE, - "Stopping TX DMA, invalid queue: %u\n", q); - return false; - } + REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M); - qi = &ah->txq[q]; - if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { - ath_dbg(common, ATH_DBG_QUEUE, - "Stopping TX DMA, inactive queue: %u\n", q); - return false; - } + REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); + REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); + REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); - REG_WRITE(ah, AR_Q_TXD, 1 << q); + for (q = 0; q < AR_NUM_QCU; q++) { + for (i = 0; i < 1000; i++) { + if (i) + udelay(5); - for (wait = wait_time; wait != 0; wait--) { - if (ath9k_hw_numtxpending(ah, q) == 0) - break; - udelay(ATH9K_TIME_QUANTUM); + if (!ath9k_hw_numtxpending(ah, q)) + break; + } } - if (ath9k_hw_numtxpending(ah, q)) { - ath_dbg(common, ATH_DBG_QUEUE, - "%s: Num of pending TX Frames %d on Q %d\n", - __func__, ath9k_hw_numtxpending(ah, q), q); - - for (j = 0; j < 2; j++) { - tsfLow = REG_READ(ah, AR_TSF_L32); - REG_WRITE(ah, AR_QUIET2, - SM(10, AR_QUIET2_QUIET_DUR)); - REG_WRITE(ah, AR_QUIET_PERIOD, 100); - REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10); - REG_SET_BIT(ah, AR_TIMER_MODE, - AR_QUIET_TIMER_EN); - - if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) - break; + REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); + REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); + REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); - ath_dbg(common, ATH_DBG_QUEUE, - "TSF has moved while trying to set quiet time TSF: 0x%08x\n", - tsfLow); - } + REG_WRITE(ah, AR_Q_TXD, 0); +} +EXPORT_SYMBOL(ath9k_hw_abort_tx_dma); - REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); +bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q) +{ +#define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */ +#define ATH9K_TIME_QUANTUM 100 /* usec */ + int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; + int wait; - udelay(200); - REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN); + REG_WRITE(ah, AR_Q_TXD, 1 << q); - wait = wait_time; - while (ath9k_hw_numtxpending(ah, q)) { - if ((--wait) == 0) { - ath_err(common, - "Failed to stop TX DMA in 100 msec after killing last frame\n"); - break; - } + for (wait = wait_time; wait != 0; wait--) { + if (wait != wait_time) udelay(ATH9K_TIME_QUANTUM); - } - REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); + if (ath9k_hw_numtxpending(ah, q) == 0) + break; } REG_WRITE(ah, AR_Q_TXD, 0); + return wait != 0; #undef ATH9K_TX_STOP_DMA_TIMEOUT #undef ATH9K_TIME_QUANTUM } -EXPORT_SYMBOL(ath9k_hw_stoptxdma); +EXPORT_SYMBOL(ath9k_hw_stop_dma_queue); void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs) { @@ -690,17 +665,23 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY; if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { + /* + * Treat these errors as mutually exclusive to avoid spurious + * extra error reports from the hardware. If a CRC error is + * reported, then decryption and MIC errors are irrelevant, + * the frame is going to be dropped either way + */ if (ads.ds_rxstatus8 & AR_CRCErr) rs->rs_status |= ATH9K_RXERR_CRC; - if (ads.ds_rxstatus8 & AR_PHYErr) { + else if (ads.ds_rxstatus8 & AR_PHYErr) { rs->rs_status |= ATH9K_RXERR_PHY; phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); rs->rs_phyerr = phyerr; - } - if (ads.ds_rxstatus8 & AR_DecryptCRCErr) + } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) rs->rs_status |= ATH9K_RXERR_DECRYPT; - if (ads.ds_rxstatus8 & AR_MichaelErr) + else if (ads.ds_rxstatus8 & AR_MichaelErr) rs->rs_status |= ATH9K_RXERR_MIC; + if (ads.ds_rxstatus8 & AR_KeyMiss) rs->rs_status |= ATH9K_RXERR_DECRYPT; } diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 7512f97e8f49..b2b2ff852c32 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -639,6 +639,8 @@ enum ath9k_rx_filter { ATH9K_RX_FILTER_PHYERR = 0x00000100, ATH9K_RX_FILTER_MYBEACON = 0x00000200, ATH9K_RX_FILTER_COMP_BAR = 0x00000400, + ATH9K_RX_FILTER_COMP_BA = 0x00000800, + ATH9K_RX_FILTER_UNCOMP_BA_BAR = 0x00001000, ATH9K_RX_FILTER_PSPOLL = 0x00004000, ATH9K_RX_FILTER_PHYRADAR = 0x00002000, ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000, @@ -674,7 +676,8 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q); void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds); u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q); bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel); -bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q); +bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q); +void ath9k_hw_abort_tx_dma(struct ath_hw *ah); void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs); bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, const struct ath9k_tx_queue_info *qinfo); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index a09d15f7aa6e..115f162c617a 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -15,20 +15,10 @@ */ #include <linux/nl80211.h> +#include <linux/delay.h> #include "ath9k.h" #include "btcoex.h" -static void ath_update_txpow(struct ath_softc *sc) -{ - struct ath_hw *ah = sc->sc_ah; - - if (sc->curtxpow != sc->config.txpowlimit) { - ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false); - /* read back in case value is clamped */ - sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit; - } -} - static u8 parse_mpdudensity(u8 mpdudensity) { /* @@ -64,17 +54,19 @@ static u8 parse_mpdudensity(u8 mpdudensity) } } -static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc, - struct ieee80211_hw *hw) +static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq) { - struct ieee80211_channel *curchan = hw->conf.channel; - struct ath9k_channel *channel; - u8 chan_idx; + bool pending = false; + + spin_lock_bh(&txq->axq_lock); - chan_idx = curchan->hw_value; - channel = &sc->sc_ah->channels[chan_idx]; - ath9k_update_ichannel(sc, hw, channel); - return channel; + if (txq->axq_depth || !list_empty(&txq->axq_acq)) + pending = true; + else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) + pending = !list_empty(&txq->txq_fifo_pending); + + spin_unlock_bh(&txq->axq_lock); + return pending; } bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode) @@ -177,7 +169,12 @@ static void ath_update_survey_nf(struct ath_softc *sc, int channel) } } -static void ath_update_survey_stats(struct ath_softc *sc) +/* + * Updates the survey statistics and returns the busy time since last + * update in %, if the measurement duration was long enough for the + * result to be useful, -1 otherwise. + */ +static int ath_update_survey_stats(struct ath_softc *sc) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); @@ -185,9 +182,10 @@ static void ath_update_survey_stats(struct ath_softc *sc) struct survey_info *survey = &sc->survey[pos]; struct ath_cycle_counters *cc = &common->cc_survey; unsigned int div = common->clockrate * 1000; + int ret = 0; if (!ah->curchan) - return; + return -1; if (ah->power_mode == ATH9K_PM_AWAKE) ath_hw_cycle_counters_update(common); @@ -202,9 +200,18 @@ static void ath_update_survey_stats(struct ath_softc *sc) survey->channel_time_rx += cc->rx_frame / div; survey->channel_time_tx += cc->tx_frame / div; } + + if (cc->cycles < div) + return -1; + + if (cc->cycles > 0) + ret = cc->rx_busy * 100 / cc->cycles; + memset(cc, 0, sizeof(*cc)); ath_update_survey_nf(sc, pos); + + return ret; } /* @@ -215,7 +222,6 @@ static void ath_update_survey_stats(struct ath_softc *sc) int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, struct ath9k_channel *hchan) { - struct ath_wiphy *aphy = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &common->hw->conf; @@ -227,10 +233,13 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, if (sc->sc_flags & SC_OP_INVALID) return -EIO; + sc->hw_busy_count = 0; + del_timer_sync(&common->ani.timer); cancel_work_sync(&sc->paprd_work); cancel_work_sync(&sc->hw_check_work); cancel_delayed_work_sync(&sc->tx_complete_work); + cancel_delayed_work_sync(&sc->hw_pll_work); ath9k_ps_wakeup(sc); @@ -251,6 +260,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, if (!ath_stoprecv(sc)) stopped = false; + if (!ath9k_hw_check_alive(ah)) + stopped = false; + /* XXX: do not flush receive queue here. We don't want * to flush data frames already in queue because of * changing channel. */ @@ -259,7 +271,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, fastcc = false; if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) - caldata = &aphy->caldata; + caldata = &sc->caldata; ath_dbg(common, ATH_DBG_CONFIG, "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n", @@ -281,17 +293,21 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, goto ps_restore; } - ath_update_txpow(sc); + ath9k_cmn_update_txpow(ah, sc->curtxpow, + sc->config.txpowlimit, &sc->curtxpow); ath9k_hw_set_interrupts(ah, ah->imask); if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) { if (sc->sc_flags & SC_OP_BEACONS) ath_beacon_config(sc, NULL); ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); + ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2); ath_start_ani(common); } ps_restore: + ieee80211_wake_queues(hw); + spin_unlock_bh(&sc->sc_pcu_lock); ath9k_ps_restore(sc); @@ -549,6 +565,12 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) struct ath_hw *ah = sc->sc_ah; an = (struct ath_node *)sta->drv_priv; +#ifdef CONFIG_ATH9K_DEBUGFS + spin_lock(&sc->nodes_lock); + list_add(&an->list, &sc->nodes); + spin_unlock(&sc->nodes_lock); + an->sta = sta; +#endif if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM) sc->sc_flags |= SC_OP_ENABLE_APM; @@ -564,6 +586,13 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta) { struct ath_node *an = (struct ath_node *)sta->drv_priv; +#ifdef CONFIG_ATH9K_DEBUGFS + spin_lock(&sc->nodes_lock); + list_del(&an->list); + spin_unlock(&sc->nodes_lock); + an->sta = NULL; +#endif + if (sc->sc_flags & SC_OP_TXAGGR) ath_tx_node_cleanup(sc, an); } @@ -571,17 +600,25 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta) void ath_hw_check(struct work_struct *work) { struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work); - int i; + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + unsigned long flags; + int busy; ath9k_ps_wakeup(sc); + if (ath9k_hw_check_alive(sc->sc_ah)) + goto out; - for (i = 0; i < 3; i++) { - if (ath9k_hw_check_alive(sc->sc_ah)) - goto out; + spin_lock_irqsave(&common->cc_lock, flags); + busy = ath_update_survey_stats(sc); + spin_unlock_irqrestore(&common->cc_lock, flags); - msleep(1); - } - ath_reset(sc, true); + ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, " + "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1); + if (busy >= 99) { + if (++sc->hw_busy_count >= 3) + ath_reset(sc, true); + } else if (busy >= 0) + sc->hw_busy_count = 0; out: ath9k_ps_restore(sc); @@ -604,7 +641,15 @@ void ath9k_tasklet(unsigned long data) ath9k_ps_wakeup(sc); spin_lock(&sc->sc_pcu_lock); - if (!ath9k_hw_check_alive(ah)) + /* + * Only run the baseband hang check if beacons stop working in AP or + * IBSS mode, because it has a high false positive rate. For station + * mode it should not be necessary, since the upper layers will detect + * this through a beacon miss automatically and the following channel + * change will trigger a hardware reset anyway + */ + if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 && + !ath9k_hw_check_alive(ah)) ieee80211_queue_work(sc->hw, &sc->hw_check_work); if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) @@ -783,54 +828,11 @@ chip_reset: #undef SCHED_INTR } -static u32 ath_get_extchanmode(struct ath_softc *sc, - struct ieee80211_channel *chan, - enum nl80211_channel_type channel_type) -{ - u32 chanmode = 0; - - switch (chan->band) { - case IEEE80211_BAND_2GHZ: - switch(channel_type) { - case NL80211_CHAN_NO_HT: - case NL80211_CHAN_HT20: - chanmode = CHANNEL_G_HT20; - break; - case NL80211_CHAN_HT40PLUS: - chanmode = CHANNEL_G_HT40PLUS; - break; - case NL80211_CHAN_HT40MINUS: - chanmode = CHANNEL_G_HT40MINUS; - break; - } - break; - case IEEE80211_BAND_5GHZ: - switch(channel_type) { - case NL80211_CHAN_NO_HT: - case NL80211_CHAN_HT20: - chanmode = CHANNEL_A_HT20; - break; - case NL80211_CHAN_HT40PLUS: - chanmode = CHANNEL_A_HT40PLUS; - break; - case NL80211_CHAN_HT40MINUS: - chanmode = CHANNEL_A_HT40MINUS; - break; - } - break; - default: - break; - } - - return chanmode; -} - static void ath9k_bss_assoc_info(struct ath_softc *sc, struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf) { - struct ath_wiphy *aphy = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); @@ -854,7 +856,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc, ath_beacon_config(sc, vif); /* Reset rssi stats */ - aphy->last_rssi = ATH_RSSI_DUMMY_MARKER; + sc->last_rssi = ATH_RSSI_DUMMY_MARKER; sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; sc->sc_flags |= SC_OP_ANI_RUN; @@ -881,7 +883,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) ath9k_hw_configpcipowersave(ah, 0, 0); if (!ah->curchan) - ah->curchan = ath_get_curchannel(sc, sc->hw); + ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah); r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); if (r) { @@ -890,7 +892,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) channel->center_freq, r); } - ath_update_txpow(sc); + ath9k_cmn_update_txpow(ah, sc->curtxpow, + sc->config.txpowlimit, &sc->curtxpow); if (ath_startrecv(sc) != 0) { ath_err(common, "Unable to restart recv logic\n"); goto out; @@ -907,6 +910,8 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) ath9k_hw_set_gpio(ah, ah->led_pin, 0); ieee80211_wake_queues(hw); + ieee80211_queue_delayed_work(hw, &sc->hw_pll_work, HZ/2); + out: spin_unlock_bh(&sc->sc_pcu_lock); @@ -920,6 +925,8 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) int r; ath9k_ps_wakeup(sc); + cancel_delayed_work_sync(&sc->hw_pll_work); + spin_lock_bh(&sc->sc_pcu_lock); ieee80211_stop_queues(hw); @@ -942,7 +949,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) ath_flushrecv(sc); /* flush recv queue */ if (!ah->curchan) - ah->curchan = ath_get_curchannel(sc, hw); + ah->curchan = ath9k_cmn_get_curchannel(hw, ah); r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); if (r) { @@ -966,6 +973,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) struct ieee80211_hw *hw = sc->hw; int r; + sc->hw_busy_count = 0; + /* Stop ANI */ del_timer_sync(&common->ani.timer); @@ -993,7 +1002,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) * that changes the channel so update any state that * might change as a result. */ - ath_update_txpow(sc); + ath9k_cmn_update_txpow(ah, sc->curtxpow, + sc->config.txpowlimit, &sc->curtxpow); if ((sc->sc_flags & SC_OP_BEACONS) || !(sc->sc_flags & (SC_OP_OFFCHANNEL))) ath_beacon_config(sc, NULL); /* restart beacons */ @@ -1021,38 +1031,13 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) return r; } -/* XXX: Remove me once we don't depend on ath9k_channel for all - * this redundant data */ -void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, - struct ath9k_channel *ichan) -{ - struct ieee80211_channel *chan = hw->conf.channel; - struct ieee80211_conf *conf = &hw->conf; - - ichan->channel = chan->center_freq; - ichan->chan = chan; - - if (chan->band == IEEE80211_BAND_2GHZ) { - ichan->chanmode = CHANNEL_G; - ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G; - } else { - ichan->chanmode = CHANNEL_A; - ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM; - } - - if (conf_is_ht(conf)) - ichan->chanmode = ath_get_extchanmode(sc, chan, - conf->channel_type); -} - /**********************/ /* mac80211 callbacks */ /**********************/ static int ath9k_start(struct ieee80211_hw *hw) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_channel *curchan = hw->conf.channel; @@ -1065,32 +1050,10 @@ static int ath9k_start(struct ieee80211_hw *hw) mutex_lock(&sc->mutex); - if (ath9k_wiphy_started(sc)) { - if (sc->chan_idx == curchan->hw_value) { - /* - * Already on the operational channel, the new wiphy - * can be marked active. - */ - aphy->state = ATH_WIPHY_ACTIVE; - ieee80211_wake_queues(hw); - } else { - /* - * Another wiphy is on another channel, start the new - * wiphy in paused state. - */ - aphy->state = ATH_WIPHY_PAUSED; - ieee80211_stop_queues(hw); - } - mutex_unlock(&sc->mutex); - return 0; - } - aphy->state = ATH_WIPHY_ACTIVE; - /* setup initial channel */ - sc->chan_idx = curchan->hw_value; - init_channel = ath_get_curchannel(sc, hw); + init_channel = ath9k_cmn_get_curchannel(hw, ah); /* Reset SERDES registers */ ath9k_hw_configpcipowersave(ah, 0, 0); @@ -1116,7 +1079,8 @@ static int ath9k_start(struct ieee80211_hw *hw) * This is needed only to setup initial state * but it's best done after a reset. */ - ath_update_txpow(sc); + ath9k_cmn_update_txpow(ah, sc->curtxpow, + sc->config.txpowlimit, &sc->curtxpow); /* * Setup the hardware after reset: @@ -1182,22 +1146,13 @@ mutex_unlock: return r; } -static int ath9k_tx(struct ieee80211_hw *hw, - struct sk_buff *skb) +static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_tx_control txctl; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) { - ath_dbg(common, ATH_DBG_XMIT, - "ath9k: %s: TX in unexpected wiphy state %d\n", - wiphy_name(hw->wiphy), aphy->state); - goto exit; - } - if (sc->ps_enabled) { /* * mac80211 does not set PM field for normal data frames, so we @@ -1248,52 +1203,30 @@ static int ath9k_tx(struct ieee80211_hw *hw, goto exit; } - return 0; + return; exit: dev_kfree_skb_any(skb); - return 0; } static void ath9k_stop(struct ieee80211_hw *hw) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - int i; mutex_lock(&sc->mutex); - aphy->state = ATH_WIPHY_INACTIVE; - - if (led_blink) - cancel_delayed_work_sync(&sc->ath_led_blink_work); - cancel_delayed_work_sync(&sc->tx_complete_work); + cancel_delayed_work_sync(&sc->hw_pll_work); cancel_work_sync(&sc->paprd_work); cancel_work_sync(&sc->hw_check_work); - for (i = 0; i < sc->num_sec_wiphy; i++) { - if (sc->sec_wiphy[i]) - break; - } - - if (i == sc->num_sec_wiphy) { - cancel_delayed_work_sync(&sc->wiphy_work); - cancel_work_sync(&sc->chan_work); - } - if (sc->sc_flags & SC_OP_INVALID) { ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); mutex_unlock(&sc->mutex); return; } - if (ath9k_wiphy_started(sc)) { - mutex_unlock(&sc->mutex); - return; /* another wiphy still in use */ - } - /* Ensure HW is awake when we try to shut it down. */ ath9k_ps_wakeup(sc); @@ -1319,6 +1252,11 @@ static void ath9k_stop(struct ieee80211_hw *hw) } else sc->rx.rxlink = NULL; + if (sc->rx.frag) { + dev_kfree_skb_any(sc->rx.frag); + sc->rx.frag = NULL; + } + /* disable HAL and put h/w to sleep */ ath9k_hw_disable(ah); ath9k_hw_configpcipowersave(ah, 1, 1); @@ -1334,7 +1272,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) ath9k_ps_restore(sc); sc->ps_idle = true; - ath9k_set_wiphy_idle(aphy, true); ath_radio_disable(sc, hw); sc->sc_flags |= SC_OP_INVALID; @@ -1344,112 +1281,225 @@ static void ath9k_stop(struct ieee80211_hw *hw) ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); } -static int ath9k_add_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +bool ath9k_uses_beacons(int type) +{ + switch (type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + return true; + default: + return false; + } +} + +static void ath9k_reclaim_beacon(struct ath_softc *sc, + struct ieee80211_vif *vif) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); struct ath_vif *avp = (void *)vif->drv_priv; - enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; - int ret = 0; - mutex_lock(&sc->mutex); + ath9k_set_beaconing_status(sc, false); + ath_beacon_return(sc, avp); + ath9k_set_beaconing_status(sc, true); + sc->sc_flags &= ~SC_OP_BEACONS; +} + +static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct ath9k_vif_iter_data *iter_data = data; + int i; + + if (iter_data->hw_macaddr) + for (i = 0; i < ETH_ALEN; i++) + iter_data->mask[i] &= + ~(iter_data->hw_macaddr[i] ^ mac[i]); switch (vif->type) { - case NL80211_IFTYPE_STATION: - ic_opmode = NL80211_IFTYPE_STATION; + case NL80211_IFTYPE_AP: + iter_data->naps++; break; - case NL80211_IFTYPE_WDS: - ic_opmode = NL80211_IFTYPE_WDS; + case NL80211_IFTYPE_STATION: + iter_data->nstations++; break; case NL80211_IFTYPE_ADHOC: - case NL80211_IFTYPE_AP: + iter_data->nadhocs++; + break; case NL80211_IFTYPE_MESH_POINT: - if (sc->nbcnvifs >= ATH_BCBUF) { - ret = -ENOBUFS; - goto out; - } - ic_opmode = vif->type; + iter_data->nmeshes++; + break; + case NL80211_IFTYPE_WDS: + iter_data->nwds++; break; default: - ath_err(common, "Interface type %d not yet supported\n", - vif->type); - ret = -EOPNOTSUPP; - goto out; + iter_data->nothers++; + break; } +} - ath_dbg(common, ATH_DBG_CONFIG, - "Attach a VIF of type: %d\n", ic_opmode); +/* Called with sc->mutex held. */ +void ath9k_calculate_iter_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ath9k_vif_iter_data *iter_data) +{ + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); - /* Set the VIF opmode */ - avp->av_opmode = ic_opmode; - avp->av_bslot = -1; + /* + * Use the hardware MAC address as reference, the hardware uses it + * together with the BSSID mask when matching addresses. + */ + memset(iter_data, 0, sizeof(*iter_data)); + iter_data->hw_macaddr = common->macaddr; + memset(&iter_data->mask, 0xff, ETH_ALEN); - sc->nvifs++; + if (vif) + ath9k_vif_iter(iter_data, vif->addr, vif); + + /* Get list of all active MAC addresses */ + ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter, + iter_data); +} - ath9k_set_bssid_mask(hw, vif); +/* Called with sc->mutex held. */ +static void ath9k_calculate_summary_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_vif_iter_data iter_data; - if (sc->nvifs > 1) - goto out; /* skip global settings for secondary vif */ + ath9k_calculate_iter_data(hw, vif, &iter_data); + + ath9k_ps_wakeup(sc); + /* Set BSSID mask. */ + memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); + ath_hw_setbssidmask(common); - if (ic_opmode == NL80211_IFTYPE_AP) { + /* Set op-mode & TSF */ + if (iter_data.naps > 0) { ath9k_hw_set_tsfadjust(ah, 1); sc->sc_flags |= SC_OP_TSF_RESET; - } + ah->opmode = NL80211_IFTYPE_AP; + } else { + ath9k_hw_set_tsfadjust(ah, 0); + sc->sc_flags &= ~SC_OP_TSF_RESET; - /* Set the device opmode */ - ah->opmode = ic_opmode; + if (iter_data.nwds + iter_data.nmeshes) + ah->opmode = NL80211_IFTYPE_AP; + else if (iter_data.nadhocs) + ah->opmode = NL80211_IFTYPE_ADHOC; + else + ah->opmode = NL80211_IFTYPE_STATION; + } /* * Enable MIB interrupts when there are hardware phy counters. - * Note we only do this (at the moment) for station mode. */ - if ((vif->type == NL80211_IFTYPE_STATION) || - (vif->type == NL80211_IFTYPE_ADHOC) || - (vif->type == NL80211_IFTYPE_MESH_POINT)) { + if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) { if (ah->config.enable_ani) ah->imask |= ATH9K_INT_MIB; ah->imask |= ATH9K_INT_TSFOOR; + } else { + ah->imask &= ~ATH9K_INT_MIB; + ah->imask &= ~ATH9K_INT_TSFOOR; } ath9k_hw_set_interrupts(ah, ah->imask); + ath9k_ps_restore(sc); - if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC) { + /* Set up ANI */ + if ((iter_data.naps + iter_data.nadhocs) > 0) { sc->sc_flags |= SC_OP_ANI_RUN; ath_start_ani(common); + } else { + sc->sc_flags &= ~SC_OP_ANI_RUN; + del_timer_sync(&common->ani.timer); } +} -out: - mutex_unlock(&sc->mutex); - return ret; +/* Called with sc->mutex held, vif counts set up properly. */ +static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct ath_softc *sc = hw->priv; + + ath9k_calculate_summary_state(hw, vif); + + if (ath9k_uses_beacons(vif->type)) { + int error; + /* This may fail because upper levels do not have beacons + * properly configured yet. That's OK, we assume it + * will be properly configured and then we will be notified + * in the info_changed method and set up beacons properly + * there. + */ + ath9k_set_beaconing_status(sc, false); + error = ath_beacon_alloc(sc, vif); + if (!error) + ath_beacon_config(sc, vif); + ath9k_set_beaconing_status(sc, true); + } } -static void ath9k_reclaim_beacon(struct ath_softc *sc, - struct ieee80211_vif *vif) + +static int ath9k_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); struct ath_vif *avp = (void *)vif->drv_priv; + int ret = 0; - /* Disable SWBA interrupt */ - sc->sc_ah->imask &= ~ATH9K_INT_SWBA; - ath9k_ps_wakeup(sc); - ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); - ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); - tasklet_kill(&sc->bcon_tasklet); - ath9k_ps_restore(sc); + mutex_lock(&sc->mutex); - ath_beacon_return(sc, avp); - sc->sc_flags &= ~SC_OP_BEACONS; + switch (vif->type) { + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: + break; + default: + ath_err(common, "Interface type %d not yet supported\n", + vif->type); + ret = -EOPNOTSUPP; + goto out; + } - if (sc->nbcnvifs > 0) { - /* Re-enable beaconing */ - sc->sc_ah->imask |= ATH9K_INT_SWBA; - ath9k_ps_wakeup(sc); - ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); - ath9k_ps_restore(sc); + if (ath9k_uses_beacons(vif->type)) { + if (sc->nbcnvifs >= ATH_BCBUF) { + ath_err(common, "Not enough beacon buffers when adding" + " new interface of type: %i\n", + vif->type); + ret = -ENOBUFS; + goto out; + } } + + if ((vif->type == NL80211_IFTYPE_ADHOC) && + sc->nvifs > 0) { + ath_err(common, "Cannot create ADHOC interface when other" + " interfaces already exist.\n"); + ret = -EINVAL; + goto out; + } + + ath_dbg(common, ATH_DBG_CONFIG, + "Attach a VIF of type: %d\n", vif->type); + + /* Set the VIF opmode */ + avp->av_opmode = vif->type; + avp->av_bslot = -1; + + sc->nvifs++; + + ath9k_do_vif_add_setup(hw, vif); +out: + mutex_unlock(&sc->mutex); + return ret; } static int ath9k_change_interface(struct ieee80211_hw *hw, @@ -1457,40 +1507,40 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, enum nl80211_iftype new_type, bool p2p) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); int ret = 0; ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n"); mutex_lock(&sc->mutex); - switch (new_type) { - case NL80211_IFTYPE_AP: - case NL80211_IFTYPE_ADHOC: + /* See if new interface type is valid. */ + if ((new_type == NL80211_IFTYPE_ADHOC) && + (sc->nvifs > 1)) { + ath_err(common, "When using ADHOC, it must be the only" + " interface.\n"); + ret = -EINVAL; + goto out; + } + + if (ath9k_uses_beacons(new_type) && + !ath9k_uses_beacons(vif->type)) { if (sc->nbcnvifs >= ATH_BCBUF) { ath_err(common, "No beacon slot available\n"); ret = -ENOBUFS; goto out; } - break; - case NL80211_IFTYPE_STATION: - /* Stop ANI */ - sc->sc_flags &= ~SC_OP_ANI_RUN; - del_timer_sync(&common->ani.timer); - if ((vif->type == NL80211_IFTYPE_AP) || - (vif->type == NL80211_IFTYPE_ADHOC)) - ath9k_reclaim_beacon(sc, vif); - break; - default: - ath_err(common, "Interface type %d not yet supported\n", - vif->type); - ret = -ENOTSUPP; - goto out; } + + /* Clean up old vif stuff */ + if (ath9k_uses_beacons(vif->type)) + ath9k_reclaim_beacon(sc, vif); + + /* Add new settings */ vif->type = new_type; vif->p2p = p2p; + ath9k_do_vif_add_setup(hw, vif); out: mutex_unlock(&sc->mutex); return ret; @@ -1499,25 +1549,20 @@ out: static void ath9k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n"); mutex_lock(&sc->mutex); - /* Stop ANI */ - sc->sc_flags &= ~SC_OP_ANI_RUN; - del_timer_sync(&common->ani.timer); + sc->nvifs--; /* Reclaim beacon resources */ - if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || - (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || - (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) + if (ath9k_uses_beacons(vif->type)) ath9k_reclaim_beacon(sc, vif); - sc->nvifs--; + ath9k_calculate_summary_state(hw, NULL); mutex_unlock(&sc->mutex); } @@ -1558,12 +1603,11 @@ static void ath9k_disable_ps(struct ath_softc *sc) static int ath9k_config(struct ieee80211_hw *hw, u32 changed) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &hw->conf; - bool disable_radio; + bool disable_radio = false; mutex_lock(&sc->mutex); @@ -1574,29 +1618,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) * the end. */ if (changed & IEEE80211_CONF_CHANGE_IDLE) { - bool enable_radio; - bool all_wiphys_idle; - bool idle = !!(conf->flags & IEEE80211_CONF_IDLE); - - spin_lock_bh(&sc->wiphy_lock); - all_wiphys_idle = ath9k_all_wiphys_idle(sc); - ath9k_set_wiphy_idle(aphy, idle); - - enable_radio = (!idle && all_wiphys_idle); - - /* - * After we unlock here its possible another wiphy - * can be re-renabled so to account for that we will - * only disable the radio toward the end of this routine - * if by then all wiphys are still idle. - */ - spin_unlock_bh(&sc->wiphy_lock); - - if (enable_radio) { - sc->ps_idle = false; + sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); + if (!sc->ps_idle) { ath_radio_enable(sc, hw); ath_dbg(common, ATH_DBG_CONFIG, "not-idle: enabling radio\n"); + } else { + disable_radio = true; } } @@ -1637,29 +1665,17 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) if (ah->curchan) old_pos = ah->curchan - &ah->channels[0]; - aphy->chan_idx = pos; - aphy->chan_is_ht = conf_is_ht(conf); if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) sc->sc_flags |= SC_OP_OFFCHANNEL; else sc->sc_flags &= ~SC_OP_OFFCHANNEL; - if (aphy->state == ATH_WIPHY_SCAN || - aphy->state == ATH_WIPHY_ACTIVE) - ath9k_wiphy_pause_all_forced(sc, aphy); - else { - /* - * Do not change operational channel based on a paused - * wiphy changes. - */ - goto skip_chan_change; - } - - ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n", - curchan->center_freq); + ath_dbg(common, ATH_DBG_CONFIG, + "Set channel: %d MHz type: %d\n", + curchan->center_freq, conf->channel_type); - /* XXX: remove me eventualy */ - ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]); + ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos], + curchan, conf->channel_type); /* update survey stats for the old channel before switching */ spin_lock_irqsave(&common->cc_lock, flags); @@ -1701,21 +1717,18 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) ath_update_survey_nf(sc, old_pos); } -skip_chan_change: if (changed & IEEE80211_CONF_CHANGE_POWER) { + ath_dbg(common, ATH_DBG_CONFIG, + "Set power: %d\n", conf->power_level); sc->config.txpowlimit = 2 * conf->power_level; ath9k_ps_wakeup(sc); - ath_update_txpow(sc); + ath9k_cmn_update_txpow(ah, sc->curtxpow, + sc->config.txpowlimit, &sc->curtxpow); ath9k_ps_restore(sc); } - spin_lock_bh(&sc->wiphy_lock); - disable_radio = ath9k_all_wiphys_idle(sc); - spin_unlock_bh(&sc->wiphy_lock); - if (disable_radio) { ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n"); - sc->ps_idle = true; ath_radio_disable(sc, hw); } @@ -1740,8 +1753,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw, unsigned int *total_flags, u64 multicast) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; u32 rfilt; changed_flags &= SUPPORTED_FILTERS; @@ -1761,8 +1773,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; ath_node_attach(sc, sta); @@ -1773,8 +1784,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; ath_node_detach(sc, sta); @@ -1784,8 +1794,7 @@ static int ath9k_sta_remove(struct ieee80211_hw *hw, static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue, const struct ieee80211_tx_queue_params *params) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_txq *txq; struct ath9k_tx_queue_info qi; @@ -1829,8 +1838,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); int ret = 0; @@ -1874,8 +1882,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_bss_conf *bss_conf, u32 changed) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; + struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath_vif *avp = (void *)vif->drv_priv; @@ -1904,10 +1912,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, /* Enable transmission of beacons (AP, IBSS, MESH) */ if ((changed & BSS_CHANGED_BEACON) || ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) { - ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); - error = ath_beacon_alloc(aphy, vif); + ath9k_set_beaconing_status(sc, false); + error = ath_beacon_alloc(sc, vif); if (!error) ath_beacon_config(sc, vif); + ath9k_set_beaconing_status(sc, true); } if (changed & BSS_CHANGED_ERP_SLOT) { @@ -1930,21 +1939,26 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, } /* Disable transmission of beacons */ - if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon) - ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); + if ((changed & BSS_CHANGED_BEACON_ENABLED) && + !bss_conf->enable_beacon) { + ath9k_set_beaconing_status(sc, false); + avp->is_bslot_active = false; + ath9k_set_beaconing_status(sc, true); + } if (changed & BSS_CHANGED_BEACON_INT) { - sc->beacon_interval = bss_conf->beacon_int; + cur_conf->beacon_interval = bss_conf->beacon_int; /* * In case of AP mode, the HW TSF has to be reset * when the beacon interval changes. */ if (vif->type == NL80211_IFTYPE_AP) { sc->sc_flags |= SC_OP_TSF_RESET; - ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); - error = ath_beacon_alloc(aphy, vif); + ath9k_set_beaconing_status(sc, false); + error = ath_beacon_alloc(sc, vif); if (!error) ath_beacon_config(sc, vif); + ath9k_set_beaconing_status(sc, true); } else { ath_beacon_config(sc, vif); } @@ -1980,9 +1994,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, static u64 ath9k_get_tsf(struct ieee80211_hw *hw) { + struct ath_softc *sc = hw->priv; u64 tsf; - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); @@ -1995,8 +2008,7 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw) static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); @@ -2007,8 +2019,7 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf) static void ath9k_reset_tsf(struct ieee80211_hw *hw) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; mutex_lock(&sc->mutex); @@ -2023,10 +2034,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, - u16 tid, u16 *ssn) + u16 tid, u16 *ssn, u8 buf_size) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; int ret = 0; local_bh_disable(); @@ -2071,8 +2081,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, static int ath9k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; @@ -2106,53 +2115,55 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx, return 0; } -static void ath9k_sw_scan_start(struct ieee80211_hw *hw) +static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->sc_ah; mutex_lock(&sc->mutex); - if (ath9k_wiphy_scanning(sc)) { - /* - * There is a race here in mac80211 but fixing it requires - * we revisit how we handle the scan complete callback. - * After mac80211 fixes we will not have configured hardware - * to the home channel nor would we have configured the RX - * filter yet. - */ - mutex_unlock(&sc->mutex); - return; - } - - aphy->state = ATH_WIPHY_SCAN; - ath9k_wiphy_pause_all_forced(sc, aphy); + ah->coverage_class = coverage_class; + ath9k_hw_init_global_settings(ah); mutex_unlock(&sc->mutex); } -/* - * XXX: this requires a revisit after the driver - * scan_complete gets moved to another place/removed in mac80211. - */ -static void ath9k_sw_scan_complete(struct ieee80211_hw *hw) +static void ath9k_flush(struct ieee80211_hw *hw, bool drop) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; + int timeout = 200; /* ms */ + int i, j; + ath9k_ps_wakeup(sc); mutex_lock(&sc->mutex); - aphy->state = ATH_WIPHY_ACTIVE; - mutex_unlock(&sc->mutex); -} -static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) -{ - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; - struct ath_hw *ah = sc->sc_ah; + cancel_delayed_work_sync(&sc->tx_complete_work); - mutex_lock(&sc->mutex); - ah->coverage_class = coverage_class; - ath9k_hw_init_global_settings(ah); + if (drop) + timeout = 1; + + for (j = 0; j < timeout; j++) { + int npend = 0; + + if (j) + usleep_range(1000, 2000); + + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (!ATH_TXQ_SETUP(sc, i)) + continue; + + npend += ath9k_has_pending_frames(sc, &sc->tx.txq[i]); + } + + if (!npend) + goto out; + } + + if (!ath_drain_all_txq(sc, false)) + ath_reset(sc, false); + +out: + ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); mutex_unlock(&sc->mutex); + ath9k_ps_restore(sc); } struct ieee80211_ops ath9k_ops = { @@ -2174,8 +2185,7 @@ struct ieee80211_ops ath9k_ops = { .reset_tsf = ath9k_reset_tsf, .ampdu_action = ath9k_ampdu_action, .get_survey = ath9k_get_survey, - .sw_scan_start = ath9k_sw_scan_start, - .sw_scan_complete = ath9k_sw_scan_complete, .rfkill_poll = ath9k_rfkill_poll_state, .set_coverage_class = ath9k_set_coverage_class, + .flush = ath9k_flush, }; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 78ef1f13386f..e83128c50f7b 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -126,7 +126,6 @@ static const struct ath_bus_ops ath_pci_bus_ops = { static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { void __iomem *mem; - struct ath_wiphy *aphy; struct ath_softc *sc; struct ieee80211_hw *hw; u8 csz; @@ -198,8 +197,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_iomap; } - hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) + - sizeof(struct ath_softc), &ath9k_ops); + hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops); if (!hw) { dev_err(&pdev->dev, "No memory for ieee80211_hw\n"); ret = -ENOMEM; @@ -209,11 +207,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) SET_IEEE80211_DEV(hw, &pdev->dev); pci_set_drvdata(pdev, hw); - aphy = hw->priv; - sc = (struct ath_softc *) (aphy + 1); - aphy->sc = sc; - aphy->hw = hw; - sc->pri_wiphy = aphy; + sc = hw->priv; sc->hw = hw; sc->dev = &pdev->dev; sc->mem = mem; @@ -260,8 +254,7 @@ err_dma: static void ath_pci_remove(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; void __iomem *mem = sc->mem; if (!is_ath9k_unloaded) @@ -281,8 +274,7 @@ static int ath_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct ieee80211_hw *hw = pci_get_drvdata(pdev); - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); @@ -293,8 +285,7 @@ static int ath_pci_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct ieee80211_hw *hw = pci_get_drvdata(pdev); - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; u32 val; /* @@ -320,7 +311,6 @@ static int ath_pci_resume(struct device *device) ath9k_ps_restore(sc); sc->ps_idle = true; - ath9k_set_wiphy_idle(aphy, true); ath_radio_disable(sc, hw); return 0; diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index e45147820eae..960d717ca7c2 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -1560,8 +1560,7 @@ static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta, static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) { - struct ath_wiphy *aphy = hw->priv; - return aphy->sc; + return hw->priv; } static void ath_rate_free(void *priv) diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index b2497b8601e5..a9c3f4672aa0 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -34,27 +34,6 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); } -static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, - struct ieee80211_hdr *hdr) -{ - struct ieee80211_hw *hw = sc->pri_wiphy->hw; - int i; - - spin_lock_bh(&sc->wiphy_lock); - for (i = 0; i < sc->num_sec_wiphy; i++) { - struct ath_wiphy *aphy = sc->sec_wiphy[i]; - if (aphy == NULL) - continue; - if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr) - == 0) { - hw = aphy->hw; - break; - } - } - spin_unlock_bh(&sc->wiphy_lock); - return hw; -} - /* * Setup and link descriptors. * @@ -230,11 +209,6 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) int error = 0, i; u32 size; - - common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN + - ah->caps.rx_status_len, - min(common->cachelsz, (u16)64)); - ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - ah->caps.rx_status_len); @@ -321,12 +295,12 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) sc->sc_flags &= ~SC_OP_RXFLUSH; spin_lock_init(&sc->rx.rxbuflock); + common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + + sc->sc_ah->caps.rx_status_len; + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { return ath_rx_edma_init(sc, nbufs); } else { - common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, - min(common->cachelsz, (u16)64)); - ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", common->cachelsz, common->rx_bufsize); @@ -439,9 +413,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) * mode interface or when in monitor mode. AP mode does not need this * since it receives all in-BSS frames anyway. */ - if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && - (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || - (sc->sc_ah->is_monitoring)) + if (sc->sc_ah->is_monitoring) rfilt |= ATH9K_RX_FILTER_PROM; if (sc->rx.rxfilter & FIF_CONTROL) @@ -463,8 +435,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) if (conf_is_ht(&sc->hw->conf)) rfilt |= ATH9K_RX_FILTER_COMP_BAR; - if (sc->sec_wiphy || (sc->nvifs > 1) || - (sc->rx.rxfilter & FIF_OTHER_BSS)) { + if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { /* The following may also be needed for other older chips */ if (sc->sc_ah->hw_version.macVersion == AR_SREV_VERSION_9160) rfilt |= ATH9K_RX_FILTER_PROM; @@ -588,8 +559,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) return; mgmt = (struct ieee80211_mgmt *)skb->data; - if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) + if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0) { + /* TODO: This doesn't work well if you have stations + * associated to two different APs because curbssid + * is just the last AP that any of the stations associated + * with. + */ return; /* not from our current AP */ + } sc->ps_flags &= ~PS_WAIT_FOR_BEACON; @@ -662,37 +639,6 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) } } -static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, - struct ath_softc *sc, struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr; - - hdr = (struct ieee80211_hdr *)skb->data; - - /* Send the frame to mac80211 */ - if (is_multicast_ether_addr(hdr->addr1)) { - int i; - /* - * Deliver broadcast/multicast frames to all suitable - * virtual wiphys. - */ - /* TODO: filter based on channel configuration */ - for (i = 0; i < sc->num_sec_wiphy; i++) { - struct ath_wiphy *aphy = sc->sec_wiphy[i]; - struct sk_buff *nskb; - if (aphy == NULL) - continue; - nskb = skb_copy(skb, GFP_ATOMIC); - if (!nskb) - continue; - ieee80211_rx(aphy->hw, nskb); - } - ieee80211_rx(sc->hw, skb); - } else - /* Deliver unicast frames based on receiver address */ - ieee80211_rx(hw, skb); -} - static bool ath_edma_get_buffers(struct ath_softc *sc, enum ath9k_rx_qtype qtype) { @@ -862,15 +808,9 @@ static bool ath9k_rx_accept(struct ath_common *common, if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) return false; - /* - * rs_more indicates chained descriptors which can be used - * to link buffers together for a sort of scatter-gather - * operation. - * reject the frame, we don't support scatter-gather yet and - * the frame is probably corrupt anyway - */ + /* Only use error bits from the last fragment */ if (rx_stats->rs_more) - return false; + return true; /* * The rx_stats->rs_status will not be set until the end of the @@ -974,7 +914,7 @@ static void ath9k_process_rssi(struct ath_common *common, struct ieee80211_hdr *hdr, struct ath_rx_status *rx_stats) { - struct ath_wiphy *aphy = hw->priv; + struct ath_softc *sc = hw->priv; struct ath_hw *ah = common->ah; int last_rssi; __le16 fc; @@ -984,13 +924,19 @@ static void ath9k_process_rssi(struct ath_common *common, fc = hdr->frame_control; if (!ieee80211_is_beacon(fc) || - compare_ether_addr(hdr->addr3, common->curbssid)) + compare_ether_addr(hdr->addr3, common->curbssid)) { + /* TODO: This doesn't work well if you have stations + * associated to two different APs because curbssid + * is just the last AP that any of the stations associated + * with. + */ return; + } if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) - ATH_RSSI_LPF(aphy->last_rssi, rx_stats->rs_rssi); + ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); - last_rssi = aphy->last_rssi; + last_rssi = sc->last_rssi; if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) rx_stats->rs_rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); @@ -1022,6 +968,10 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common, if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) return -EINVAL; + /* Only use status info from the last fragment */ + if (rx_stats->rs_more) + return 0; + ath9k_process_rssi(common, hw, hdr, rx_stats); if (ath9k_process_rate(common, hw, rx_stats, rx_status)) @@ -1031,7 +981,7 @@ static int ath9k_rx_skb_preprocess(struct ath_common *common, rx_status->freq = hw->conf.channel->center_freq; rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; rx_status->antenna = rx_stats->rs_antenna; - rx_status->flag |= RX_FLAG_TSFT; + rx_status->flag |= RX_FLAG_MACTIME_MPDU; return 0; } @@ -1623,7 +1573,7 @@ div_comb_done: int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) { struct ath_buf *bf; - struct sk_buff *skb = NULL, *requeue_skb; + struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; struct ieee80211_rx_status *rxs; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); @@ -1632,7 +1582,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) * virtual wiphy so to account for that we iterate over the active * wiphys and find the appropriate wiphy and therefore hw. */ - struct ieee80211_hw *hw = NULL; + struct ieee80211_hw *hw = sc->hw; struct ieee80211_hdr *hdr; int retval; bool decrypt_error = false; @@ -1674,10 +1624,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (!skb) continue; - hdr = (struct ieee80211_hdr *) (skb->data + rx_status_len); - rxs = IEEE80211_SKB_RXCB(skb); + /* + * Take frame header from the first fragment and RX status from + * the last one. + */ + if (sc->rx.frag) + hdr_skb = sc->rx.frag; + else + hdr_skb = skb; - hw = ath_get_virt_hw(sc, hdr); + hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); + rxs = IEEE80211_SKB_RXCB(hdr_skb); ath_debug_stat_rx(sc, &rs); @@ -1686,12 +1643,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) * chain it back at the queue without processing it. */ if (flush) - goto requeue; + goto requeue_drop_frag; retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, rxs, &decrypt_error); if (retval) - goto requeue; + goto requeue_drop_frag; rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; if (rs.rs_tstamp > tsf_lower && @@ -1711,7 +1668,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) * skb and put it at the tail of the sc->rx.rxbuf list for * processing. */ if (!requeue_skb) - goto requeue; + goto requeue_drop_frag; /* Unmap the frame */ dma_unmap_single(sc->dev, bf->bf_buf_addr, @@ -1722,8 +1679,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (ah->caps.rx_status_len) skb_pull(skb, ah->caps.rx_status_len); - ath9k_rx_skb_postprocess(common, skb, &rs, - rxs, decrypt_error); + if (!rs.rs_more) + ath9k_rx_skb_postprocess(common, hdr_skb, &rs, + rxs, decrypt_error); /* We will now give hardware our shiny new allocated skb */ bf->bf_mpdu = requeue_skb; @@ -1736,10 +1694,42 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) bf->bf_mpdu = NULL; bf->bf_buf_addr = 0; ath_err(common, "dma_mapping_error() on RX\n"); - ath_rx_send_to_mac80211(hw, sc, skb); + ieee80211_rx(hw, skb); break; } + if (rs.rs_more) { + /* + * rs_more indicates chained descriptors which can be + * used to link buffers together for a sort of + * scatter-gather operation. + */ + if (sc->rx.frag) { + /* too many fragments - cannot handle frame */ + dev_kfree_skb_any(sc->rx.frag); + dev_kfree_skb_any(skb); + skb = NULL; + } + sc->rx.frag = skb; + goto requeue; + } + + if (sc->rx.frag) { + int space = skb->len - skb_tailroom(hdr_skb); + + sc->rx.frag = NULL; + + if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { + dev_kfree_skb(skb); + goto requeue_drop_frag; + } + + skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), + skb->len); + dev_kfree_skb_any(skb); + skb = hdr_skb; + } + /* * change the default rx antenna if rx diversity chooses the * other antenna 3 times in a row. @@ -1763,8 +1753,13 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) ath_ant_comb_scan(sc, &rs); - ath_rx_send_to_mac80211(hw, sc, skb); + ieee80211_rx(hw, skb); +requeue_drop_frag: + if (sc->rx.frag) { + dev_kfree_skb_any(sc->rx.frag); + sc->rx.frag = NULL; + } requeue: if (edma) { list_add_tail(&bf->list, &sc->rx.rxbuf); diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 4df5659c6c16..8fa8acfde62e 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -789,6 +789,7 @@ #define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */ #define AR_SREV_VERSION_9485 0x240 #define AR_SREV_REVISION_9485_10 0 +#define AR_SREV_REVISION_9485_11 1 #define AR_SREV_5416(_ah) \ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ @@ -866,6 +867,9 @@ #define AR_SREV_9485_10(_ah) \ (AR_SREV_9485(_ah) && \ ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10)) +#define AR_SREV_9485_11(_ah) \ + (AR_SREV_9485(_ah) && \ + ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11)) #define AR_SREV_9285E_20(_ah) \ (AR_SREV_9285_12_OR_LATER(_ah) && \ @@ -874,6 +878,7 @@ enum ath_usb_dev { AR9280_USB = 1, /* AR7010 + AR9280, UB94 */ AR9287_USB = 2, /* AR7010 + AR9287, UB95 */ + STORAGE_DEVICE = 3, }; #define AR_DEVID_7010(_ah) \ @@ -1083,6 +1088,17 @@ enum { #define AR_ENT_OTP 0x40d8 #define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000 #define AR_ENT_OTP_MPSD 0x00800000 +#define AR_CH0_BB_DPLL2 0x16184 +#define AR_CH0_BB_DPLL3 0x16188 +#define AR_CH0_DDR_DPLL2 0x16244 +#define AR_CH0_DDR_DPLL3 0x16248 +#define AR_CH0_DPLL2_KD 0x03F80000 +#define AR_CH0_DPLL2_KD_S 19 +#define AR_CH0_DPLL2_KI 0x3C000000 +#define AR_CH0_DPLL2_KI_S 26 +#define AR_CH0_DPLL3_PHASE_SHIFT 0x3F800000 +#define AR_CH0_DPLL3_PHASE_SHIFT_S 23 +#define AR_PHY_CCA_NOM_VAL_2GHZ -118 #define AR_RTC_9300_PLL_DIV 0x000003ff #define AR_RTC_9300_PLL_DIV_S 0 @@ -1129,6 +1145,12 @@ enum { #define AR_RTC_PLL_CLKSEL 0x00000300 #define AR_RTC_PLL_CLKSEL_S 8 +#define PLL3 0x16188 +#define PLL3_DO_MEAS_MASK 0x40000000 +#define PLL4 0x1618c +#define PLL4_MEAS_DONE 0x8 +#define SQSUM_DVC_MASK 0x007ffff8 + #define AR_RTC_RESET \ ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040) #define AR_RTC_RESET_EN (0x00000001) diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c deleted file mode 100644 index 2dc7095e56d1..000000000000 --- a/drivers/net/wireless/ath/ath9k/virtual.c +++ /dev/null @@ -1,717 +0,0 @@ -/* - * Copyright (c) 2008-2009 Atheros Communications Inc. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include <linux/slab.h> - -#include "ath9k.h" - -struct ath9k_vif_iter_data { - const u8 *hw_macaddr; - u8 mask[ETH_ALEN]; -}; - -static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) -{ - struct ath9k_vif_iter_data *iter_data = data; - int i; - - for (i = 0; i < ETH_ALEN; i++) - iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); -} - -void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath9k_vif_iter_data iter_data; - int i; - - /* - * Use the hardware MAC address as reference, the hardware uses it - * together with the BSSID mask when matching addresses. - */ - iter_data.hw_macaddr = common->macaddr; - memset(&iter_data.mask, 0xff, ETH_ALEN); - - if (vif) - ath9k_vif_iter(&iter_data, vif->addr, vif); - - /* Get list of all active MAC addresses */ - spin_lock_bh(&sc->wiphy_lock); - ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter, - &iter_data); - for (i = 0; i < sc->num_sec_wiphy; i++) { - if (sc->sec_wiphy[i] == NULL) - continue; - ieee80211_iterate_active_interfaces_atomic( - sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data); - } - spin_unlock_bh(&sc->wiphy_lock); - - memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); - ath_hw_setbssidmask(common); -} - -int ath9k_wiphy_add(struct ath_softc *sc) -{ - int i, error; - struct ath_wiphy *aphy; - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ieee80211_hw *hw; - u8 addr[ETH_ALEN]; - - hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops); - if (hw == NULL) - return -ENOMEM; - - spin_lock_bh(&sc->wiphy_lock); - for (i = 0; i < sc->num_sec_wiphy; i++) { - if (sc->sec_wiphy[i] == NULL) - break; - } - - if (i == sc->num_sec_wiphy) { - /* No empty slot available; increase array length */ - struct ath_wiphy **n; - n = krealloc(sc->sec_wiphy, - (sc->num_sec_wiphy + 1) * - sizeof(struct ath_wiphy *), - GFP_ATOMIC); - if (n == NULL) { - spin_unlock_bh(&sc->wiphy_lock); - ieee80211_free_hw(hw); - return -ENOMEM; - } - n[i] = NULL; - sc->sec_wiphy = n; - sc->num_sec_wiphy++; - } - - SET_IEEE80211_DEV(hw, sc->dev); - - aphy = hw->priv; - aphy->sc = sc; - aphy->hw = hw; - sc->sec_wiphy[i] = aphy; - aphy->last_rssi = ATH_RSSI_DUMMY_MARKER; - spin_unlock_bh(&sc->wiphy_lock); - - memcpy(addr, common->macaddr, ETH_ALEN); - addr[0] |= 0x02; /* Locally managed address */ - /* - * XOR virtual wiphy index into the least significant bits to generate - * a different MAC address for each virtual wiphy. - */ - addr[5] ^= i & 0xff; - addr[4] ^= (i & 0xff00) >> 8; - addr[3] ^= (i & 0xff0000) >> 16; - - SET_IEEE80211_PERM_ADDR(hw, addr); - - ath9k_set_hw_capab(sc, hw); - - error = ieee80211_register_hw(hw); - - if (error == 0) { - /* Make sure wiphy scheduler is started (if enabled) */ - ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int); - } - - return error; -} - -int ath9k_wiphy_del(struct ath_wiphy *aphy) -{ - struct ath_softc *sc = aphy->sc; - int i; - - spin_lock_bh(&sc->wiphy_lock); - for (i = 0; i < sc->num_sec_wiphy; i++) { - if (aphy == sc->sec_wiphy[i]) { - sc->sec_wiphy[i] = NULL; - spin_unlock_bh(&sc->wiphy_lock); - ieee80211_unregister_hw(aphy->hw); - ieee80211_free_hw(aphy->hw); - return 0; - } - } - spin_unlock_bh(&sc->wiphy_lock); - return -ENOENT; -} - -static int ath9k_send_nullfunc(struct ath_wiphy *aphy, - struct ieee80211_vif *vif, const u8 *bssid, - int ps) -{ - struct ath_softc *sc = aphy->sc; - struct ath_tx_control txctl; - struct sk_buff *skb; - struct ieee80211_hdr *hdr; - __le16 fc; - struct ieee80211_tx_info *info; - - skb = dev_alloc_skb(24); - if (skb == NULL) - return -ENOMEM; - hdr = (struct ieee80211_hdr *) skb_put(skb, 24); - memset(hdr, 0, 24); - fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | - IEEE80211_FCTL_TODS); - if (ps) - fc |= cpu_to_le16(IEEE80211_FCTL_PM); - hdr->frame_control = fc; - memcpy(hdr->addr1, bssid, ETH_ALEN); - memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN); - memcpy(hdr->addr3, bssid, ETH_ALEN); - - info = IEEE80211_SKB_CB(skb); - memset(info, 0, sizeof(*info)); - info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS; - info->control.vif = vif; - info->control.rates[0].idx = 0; - info->control.rates[0].count = 4; - info->control.rates[1].idx = -1; - - memset(&txctl, 0, sizeof(struct ath_tx_control)); - txctl.txq = sc->tx.txq_map[WME_AC_VO]; - txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE; - - if (ath_tx_start(aphy->hw, skb, &txctl) != 0) - goto exit; - - return 0; -exit: - dev_kfree_skb_any(skb); - return -1; -} - -static bool __ath9k_wiphy_pausing(struct ath_softc *sc) -{ - int i; - if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING) - return true; - for (i = 0; i < sc->num_sec_wiphy; i++) { - if (sc->sec_wiphy[i] && - sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING) - return true; - } - return false; -} - -static bool ath9k_wiphy_pausing(struct ath_softc *sc) -{ - bool ret; - spin_lock_bh(&sc->wiphy_lock); - ret = __ath9k_wiphy_pausing(sc); - spin_unlock_bh(&sc->wiphy_lock); - return ret; -} - -static bool __ath9k_wiphy_scanning(struct ath_softc *sc) -{ - int i; - if (sc->pri_wiphy->state == ATH_WIPHY_SCAN) - return true; - for (i = 0; i < sc->num_sec_wiphy; i++) { - if (sc->sec_wiphy[i] && - sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN) - return true; - } - return false; -} - -bool ath9k_wiphy_scanning(struct ath_softc *sc) -{ - bool ret; - spin_lock_bh(&sc->wiphy_lock); - ret = __ath9k_wiphy_scanning(sc); - spin_unlock_bh(&sc->wiphy_lock); - return ret; -} - -static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy); - -/* caller must hold wiphy_lock */ -static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy) -{ - if (aphy == NULL) - return; - if (aphy->chan_idx != aphy->sc->chan_idx) - return; /* wiphy not on the selected channel */ - __ath9k_wiphy_unpause(aphy); -} - -static void ath9k_wiphy_unpause_channel(struct ath_softc *sc) -{ - int i; - spin_lock_bh(&sc->wiphy_lock); - __ath9k_wiphy_unpause_ch(sc->pri_wiphy); - for (i = 0; i < sc->num_sec_wiphy; i++) - __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]); - spin_unlock_bh(&sc->wiphy_lock); -} - -void ath9k_wiphy_chan_work(struct work_struct *work) -{ - struct ath_softc *sc = container_of(work, struct ath_softc, chan_work); - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_wiphy *aphy = sc->next_wiphy; - - if (aphy == NULL) - return; - - /* - * All pending interfaces paused; ready to change - * channels. - */ - - /* Change channels */ - mutex_lock(&sc->mutex); - /* XXX: remove me eventually */ - ath9k_update_ichannel(sc, aphy->hw, - &sc->sc_ah->channels[sc->chan_idx]); - - /* sync hw configuration for hw code */ - common->hw = aphy->hw; - - if (ath_set_channel(sc, aphy->hw, - &sc->sc_ah->channels[sc->chan_idx]) < 0) { - printk(KERN_DEBUG "ath9k: Failed to set channel for new " - "virtual wiphy\n"); - mutex_unlock(&sc->mutex); - return; - } - mutex_unlock(&sc->mutex); - - ath9k_wiphy_unpause_channel(sc); -} - -/* - * ath9k version of ieee80211_tx_status() for TX frames that are generated - * internally in the driver. - */ -void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype) -{ - struct ath_wiphy *aphy = hw->priv; - struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - - if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) { - if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) { - printk(KERN_DEBUG "ath9k: %s: no ACK for pause " - "frame\n", wiphy_name(hw->wiphy)); - /* - * The AP did not reply; ignore this to allow us to - * continue. - */ - } - aphy->state = ATH_WIPHY_PAUSED; - if (!ath9k_wiphy_pausing(aphy->sc)) { - /* - * Drop from tasklet to work to allow mutex for channel - * change. - */ - ieee80211_queue_work(aphy->sc->hw, - &aphy->sc->chan_work); - } - } - - dev_kfree_skb(skb); -} - -static void ath9k_mark_paused(struct ath_wiphy *aphy) -{ - struct ath_softc *sc = aphy->sc; - aphy->state = ATH_WIPHY_PAUSED; - if (!__ath9k_wiphy_pausing(sc)) - ieee80211_queue_work(sc->hw, &sc->chan_work); -} - -static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif) -{ - struct ath_wiphy *aphy = data; - struct ath_vif *avp = (void *) vif->drv_priv; - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - if (!vif->bss_conf.assoc) { - ath9k_mark_paused(aphy); - break; - } - /* TODO: could avoid this if already in PS mode */ - if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) { - printk(KERN_DEBUG "%s: failed to send PS nullfunc\n", - __func__); - ath9k_mark_paused(aphy); - } - break; - case NL80211_IFTYPE_AP: - /* Beacon transmission is paused by aphy->state change */ - ath9k_mark_paused(aphy); - break; - default: - break; - } -} - -/* caller must hold wiphy_lock */ -static int __ath9k_wiphy_pause(struct ath_wiphy *aphy) -{ - ieee80211_stop_queues(aphy->hw); - aphy->state = ATH_WIPHY_PAUSING; - /* - * TODO: handle PAUSING->PAUSED for the case where there are multiple - * active vifs (now we do it on the first vif getting ready; should be - * on the last) - */ - ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter, - aphy); - return 0; -} - -int ath9k_wiphy_pause(struct ath_wiphy *aphy) -{ - int ret; - spin_lock_bh(&aphy->sc->wiphy_lock); - ret = __ath9k_wiphy_pause(aphy); - spin_unlock_bh(&aphy->sc->wiphy_lock); - return ret; -} - -static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif) -{ - struct ath_wiphy *aphy = data; - struct ath_vif *avp = (void *) vif->drv_priv; - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - if (!vif->bss_conf.assoc) - break; - ath9k_send_nullfunc(aphy, vif, avp->bssid, 0); - break; - case NL80211_IFTYPE_AP: - /* Beacon transmission is re-enabled by aphy->state change */ - break; - default: - break; - } -} - -/* caller must hold wiphy_lock */ -static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy) -{ - ieee80211_iterate_active_interfaces_atomic(aphy->hw, - ath9k_unpause_iter, aphy); - aphy->state = ATH_WIPHY_ACTIVE; - ieee80211_wake_queues(aphy->hw); - return 0; -} - -int ath9k_wiphy_unpause(struct ath_wiphy *aphy) -{ - int ret; - spin_lock_bh(&aphy->sc->wiphy_lock); - ret = __ath9k_wiphy_unpause(aphy); - spin_unlock_bh(&aphy->sc->wiphy_lock); - return ret; -} - -static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc) -{ - int i; - if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) - sc->pri_wiphy->state = ATH_WIPHY_PAUSED; - for (i = 0; i < sc->num_sec_wiphy; i++) { - if (sc->sec_wiphy[i] && - sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) - sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED; - } -} - -/* caller must hold wiphy_lock */ -static void __ath9k_wiphy_pause_all(struct ath_softc *sc) -{ - int i; - if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) - __ath9k_wiphy_pause(sc->pri_wiphy); - for (i = 0; i < sc->num_sec_wiphy; i++) { - if (sc->sec_wiphy[i] && - sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE) - __ath9k_wiphy_pause(sc->sec_wiphy[i]); - } -} - -int ath9k_wiphy_select(struct ath_wiphy *aphy) -{ - struct ath_softc *sc = aphy->sc; - bool now; - - spin_lock_bh(&sc->wiphy_lock); - if (__ath9k_wiphy_scanning(sc)) { - /* - * For now, we are using mac80211 sw scan and it expects to - * have full control over channel changes, so avoid wiphy - * scheduling during a scan. This could be optimized if the - * scanning control were moved into the driver. - */ - spin_unlock_bh(&sc->wiphy_lock); - return -EBUSY; - } - if (__ath9k_wiphy_pausing(sc)) { - if (sc->wiphy_select_failures == 0) - sc->wiphy_select_first_fail = jiffies; - sc->wiphy_select_failures++; - if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2)) - { - printk(KERN_DEBUG "ath9k: Previous wiphy select timed " - "out; disable/enable hw to recover\n"); - __ath9k_wiphy_mark_all_paused(sc); - /* - * TODO: this workaround to fix hardware is unlikely to - * be specific to virtual wiphy changes. It can happen - * on normal channel change, too, and as such, this - * should really be made more generic. For example, - * tricker radio disable/enable on GTT interrupt burst - * (say, 10 GTT interrupts received without any TX - * frame being completed) - */ - spin_unlock_bh(&sc->wiphy_lock); - ath_radio_disable(sc, aphy->hw); - ath_radio_enable(sc, aphy->hw); - /* Only the primary wiphy hw is used for queuing work */ - ieee80211_queue_work(aphy->sc->hw, - &aphy->sc->chan_work); - return -EBUSY; /* previous select still in progress */ - } - spin_unlock_bh(&sc->wiphy_lock); - return -EBUSY; /* previous select still in progress */ - } - sc->wiphy_select_failures = 0; - - /* Store the new channel */ - sc->chan_idx = aphy->chan_idx; - sc->chan_is_ht = aphy->chan_is_ht; - sc->next_wiphy = aphy; - - __ath9k_wiphy_pause_all(sc); - now = !__ath9k_wiphy_pausing(aphy->sc); - spin_unlock_bh(&sc->wiphy_lock); - - if (now) { - /* Ready to request channel change immediately */ - ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work); - } - - /* - * wiphys will be unpaused in ath9k_tx_status() once channel has been - * changed if any wiphy needs time to become paused. - */ - - return 0; -} - -bool ath9k_wiphy_started(struct ath_softc *sc) -{ - int i; - spin_lock_bh(&sc->wiphy_lock); - if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) { - spin_unlock_bh(&sc->wiphy_lock); - return true; - } - for (i = 0; i < sc->num_sec_wiphy; i++) { - if (sc->sec_wiphy[i] && - sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) { - spin_unlock_bh(&sc->wiphy_lock); - return true; - } - } - spin_unlock_bh(&sc->wiphy_lock); - return false; -} - -static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy, - struct ath_wiphy *selected) -{ - if (selected->state == ATH_WIPHY_SCAN) { - if (aphy == selected) - return; - /* - * Pause all other wiphys for the duration of the scan even if - * they are on the current channel now. - */ - } else if (aphy->chan_idx == selected->chan_idx) - return; - aphy->state = ATH_WIPHY_PAUSED; - ieee80211_stop_queues(aphy->hw); -} - -void ath9k_wiphy_pause_all_forced(struct ath_softc *sc, - struct ath_wiphy *selected) -{ - int i; - spin_lock_bh(&sc->wiphy_lock); - if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) - ath9k_wiphy_pause_chan(sc->pri_wiphy, selected); - for (i = 0; i < sc->num_sec_wiphy; i++) { - if (sc->sec_wiphy[i] && - sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE) - ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected); - } - spin_unlock_bh(&sc->wiphy_lock); -} - -void ath9k_wiphy_work(struct work_struct *work) -{ - struct ath_softc *sc = container_of(work, struct ath_softc, - wiphy_work.work); - struct ath_wiphy *aphy = NULL; - bool first = true; - - spin_lock_bh(&sc->wiphy_lock); - - if (sc->wiphy_scheduler_int == 0) { - /* wiphy scheduler is disabled */ - spin_unlock_bh(&sc->wiphy_lock); - return; - } - -try_again: - sc->wiphy_scheduler_index++; - while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) { - aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1]; - if (aphy && aphy->state != ATH_WIPHY_INACTIVE) - break; - - sc->wiphy_scheduler_index++; - aphy = NULL; - } - if (aphy == NULL) { - sc->wiphy_scheduler_index = 0; - if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) { - if (first) { - first = false; - goto try_again; - } - /* No wiphy is ready to be scheduled */ - } else - aphy = sc->pri_wiphy; - } - - spin_unlock_bh(&sc->wiphy_lock); - - if (aphy && - aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN && - ath9k_wiphy_select(aphy)) { - printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy " - "change\n"); - } - - ieee80211_queue_delayed_work(sc->hw, - &sc->wiphy_work, - sc->wiphy_scheduler_int); -} - -void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int) -{ - cancel_delayed_work_sync(&sc->wiphy_work); - sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int); - if (sc->wiphy_scheduler_int) - ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work, - sc->wiphy_scheduler_int); -} - -/* caller must hold wiphy_lock */ -bool ath9k_all_wiphys_idle(struct ath_softc *sc) -{ - unsigned int i; - if (!sc->pri_wiphy->idle) - return false; - for (i = 0; i < sc->num_sec_wiphy; i++) { - struct ath_wiphy *aphy = sc->sec_wiphy[i]; - if (!aphy) - continue; - if (!aphy->idle) - return false; - } - return true; -} - -/* caller must hold wiphy_lock */ -void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle) -{ - struct ath_softc *sc = aphy->sc; - - aphy->idle = idle; - ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, - "Marking %s as %sidle\n", - wiphy_name(aphy->hw->wiphy), idle ? "" : "not-"); -} -/* Only bother starting a queue on an active virtual wiphy */ -bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue) -{ - struct ieee80211_hw *hw = sc->pri_wiphy->hw; - unsigned int i; - bool txq_started = false; - - spin_lock_bh(&sc->wiphy_lock); - - /* Start the primary wiphy */ - if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) { - ieee80211_wake_queue(hw, skb_queue); - txq_started = true; - goto unlock; - } - - /* Now start the secondary wiphy queues */ - for (i = 0; i < sc->num_sec_wiphy; i++) { - struct ath_wiphy *aphy = sc->sec_wiphy[i]; - if (!aphy) - continue; - if (aphy->state != ATH_WIPHY_ACTIVE) - continue; - - hw = aphy->hw; - ieee80211_wake_queue(hw, skb_queue); - txq_started = true; - break; - } - -unlock: - spin_unlock_bh(&sc->wiphy_lock); - return txq_started; -} - -/* Go ahead and propagate information to all virtual wiphys, it won't hurt */ -void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue) -{ - struct ieee80211_hw *hw = sc->pri_wiphy->hw; - unsigned int i; - - spin_lock_bh(&sc->wiphy_lock); - - /* Stop the primary wiphy */ - ieee80211_stop_queue(hw, skb_queue); - - /* Now stop the secondary wiphy queues */ - for (i = 0; i < sc->num_sec_wiphy; i++) { - struct ath_wiphy *aphy = sc->sec_wiphy[i]; - if (!aphy) - continue; - hw = aphy->hw; - ieee80211_stop_queue(hw, skb_queue); - } - spin_unlock_bh(&sc->wiphy_lock); -} diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index dc862f5e1162..d3d24904f62f 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -123,12 +123,8 @@ void ath9k_deinit_wmi(struct ath9k_htc_priv *priv) void ath9k_swba_tasklet(unsigned long data) { struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; - struct ath_common *common = ath9k_hw_common(priv->ah); - - ath_dbg(common, ATH_DBG_WMI, "SWBA Event received\n"); ath9k_htc_swba(priv, priv->wmi->beacon_pending); - } void ath9k_fatal_work(struct work_struct *work) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 07b7804aec5b..ef22096d40c9 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -19,7 +19,6 @@ #define BITS_PER_BYTE 8 #define OFDM_PLCP_BITS 22 -#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f) #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) #define L_STF 8 #define L_LTF 8 @@ -32,7 +31,6 @@ #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) -#define OFDM_SIFS_TIME 16 static u16 bits_per_symbol[][2] = { /* 20MHz 40MHz */ @@ -57,8 +55,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, struct list_head *head); static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len); -static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, - int nframes, int nbad, int txok, bool update_rc); +static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, + struct ath_tx_status *ts, int nframes, int nbad, + int txok, bool update_rc); static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, int seqno); @@ -167,9 +166,9 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) fi = get_frame_info(bf->bf_mpdu); if (fi->retries) { ath_tx_update_baw(sc, tid, fi->seqno); - ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); + ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1); } else { - ath_tx_send_normal(sc, txq, tid, &bf_head); + ath_tx_send_normal(sc, txq, NULL, &bf_head); } spin_lock_bh(&txq->axq_lock); } @@ -297,7 +296,6 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) ATH_TXBUF_RESET(tbf); - tbf->aphy = bf->aphy; tbf->bf_mpdu = bf->bf_mpdu; tbf->bf_buf_addr = bf->bf_buf_addr; memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); @@ -345,7 +343,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, struct ath_node *an = NULL; struct sk_buff *skb; struct ieee80211_sta *sta; - struct ieee80211_hw *hw; + struct ieee80211_hw *hw = sc->hw; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *tx_info; struct ath_atx_tid *tid = NULL; @@ -364,7 +362,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, hdr = (struct ieee80211_hdr *)skb->data; tx_info = IEEE80211_SKB_CB(skb); - hw = bf->aphy->hw; memcpy(rates, tx_info->control.rates, sizeof(rates)); @@ -383,7 +380,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, !bf->bf_stale || bf_next != NULL) list_move_tail(&bf->list, &bf_head); - ath_tx_rc_status(bf, ts, 1, 1, 0, false); + ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false); ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0, 0); @@ -429,7 +426,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); while (bf) { - txfail = txpending = 0; + txfail = txpending = sendbar = 0; bf_next = bf->bf_next; skb = bf->bf_mpdu; @@ -489,10 +486,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { memcpy(tx_info->control.rates, rates, sizeof(rates)); - ath_tx_rc_status(bf, ts, nframes, nbad, txok, true); + ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true); rc_update = false; } else { - ath_tx_rc_status(bf, ts, nframes, nbad, txok, false); + ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false); } ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, @@ -516,7 +513,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, bf->bf_state.bf_type |= BUF_XRETRY; - ath_tx_rc_status(bf, ts, nframes, + ath_tx_rc_status(sc, bf, ts, nframes, nbad, 0, false); ath_tx_complete_buf(sc, bf, txq, &bf_head, @@ -566,8 +563,11 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, rcu_read_unlock(); - if (needreset) + if (needreset) { + spin_unlock_bh(&sc->sc_pcu_lock); ath_reset(sc, false); + spin_lock_bh(&sc->sc_pcu_lock); + } } static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, @@ -856,7 +856,10 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, txtid->state |= AGGR_ADDBA_PROGRESS; txtid->paused = true; - *ssn = txtid->seq_start; + *ssn = txtid->seq_start = txtid->seq_next; + + memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf)); + txtid->baw_head = txtid->baw_tail = 0; return 0; } @@ -942,7 +945,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) [WME_AC_VI] = ATH_TXQ_AC_VI, [WME_AC_VO] = ATH_TXQ_AC_VO, }; - int qnum, i; + int axq_qnum, i; memset(&qi, 0, sizeof(qi)); qi.tqi_subtype = subtype_txq_to_hwq[subtype]; @@ -976,24 +979,25 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE; } - qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); - if (qnum == -1) { + axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); + if (axq_qnum == -1) { /* * NB: don't print a message, this happens * normally on parts with too few tx queues */ return NULL; } - if (qnum >= ARRAY_SIZE(sc->tx.txq)) { + if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) { ath_err(common, "qnum %u out of range, max %zu!\n", - qnum, ARRAY_SIZE(sc->tx.txq)); - ath9k_hw_releasetxqueue(ah, qnum); + axq_qnum, ARRAY_SIZE(sc->tx.txq)); + ath9k_hw_releasetxqueue(ah, axq_qnum); return NULL; } - if (!ATH_TXQ_SETUP(sc, qnum)) { - struct ath_txq *txq = &sc->tx.txq[qnum]; + if (!ATH_TXQ_SETUP(sc, axq_qnum)) { + struct ath_txq *txq = &sc->tx.txq[axq_qnum]; - txq->axq_qnum = qnum; + txq->axq_qnum = axq_qnum; + txq->mac80211_qnum = -1; txq->axq_link = NULL; INIT_LIST_HEAD(&txq->axq_q); INIT_LIST_HEAD(&txq->axq_acq); @@ -1001,14 +1005,14 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) txq->axq_depth = 0; txq->axq_ampdu_depth = 0; txq->axq_tx_inprogress = false; - sc->tx.txqsetup |= 1<<qnum; + sc->tx.txqsetup |= 1<<axq_qnum; txq->txq_headidx = txq->txq_tailidx = 0; for (i = 0; i < ATH_TXFIFO_DEPTH; i++) INIT_LIST_HEAD(&txq->txq_fifo[i]); INIT_LIST_HEAD(&txq->txq_fifo_pending); } - return &sc->tx.txq[qnum]; + return &sc->tx.txq[axq_qnum]; } int ath_txq_update(struct ath_softc *sc, int qnum, @@ -1051,6 +1055,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum, int ath_cabq_update(struct ath_softc *sc) { struct ath9k_tx_queue_info qi; + struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; int qnum = sc->beacon.cabq->axq_qnum; ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); @@ -1062,7 +1067,7 @@ int ath_cabq_update(struct ath_softc *sc) else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; - qi.tqi_readyTime = (sc->beacon_interval * + qi.tqi_readyTime = (cur_conf->beacon_interval * sc->config.cabqReadytime) / 100; ath_txq_update(sc, qnum, &qi); @@ -1189,24 +1194,31 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) if (sc->sc_flags & SC_OP_INVALID) return true; - /* Stop beacon queue */ - ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); + ath9k_hw_abort_tx_dma(ah); - /* Stop data queues */ + /* Check if any queue remains active */ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { - if (ATH_TXQ_SETUP(sc, i)) { - txq = &sc->tx.txq[i]; - ath9k_hw_stoptxdma(ah, txq->axq_qnum); - npend += ath9k_hw_numtxpending(ah, txq->axq_qnum); - } + if (!ATH_TXQ_SETUP(sc, i)) + continue; + + npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum); } if (npend) ath_err(common, "Failed to stop TX DMA!\n"); for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { - if (ATH_TXQ_SETUP(sc, i)) - ath_draintxq(sc, &sc->tx.txq[i], retry_tx); + if (!ATH_TXQ_SETUP(sc, i)) + continue; + + /* + * The caller will resume queues with ieee80211_wake_queues. + * Mark the queue as not stopped to prevent ath_tx_complete + * from waking the queue too early. + */ + txq = &sc->tx.txq[i]; + txq->stopped = false; + ath_draintxq(sc, txq, retry_tx); } return !npend; @@ -1218,46 +1230,59 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) sc->tx.txqsetup &= ~(1<<txq->axq_qnum); } +/* For each axq_acq entry, for each tid, try to schedule packets + * for transmit until ampdu_depth has reached min Q depth. + */ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) { - struct ath_atx_ac *ac; - struct ath_atx_tid *tid; + struct ath_atx_ac *ac, *ac_tmp, *last_ac; + struct ath_atx_tid *tid, *last_tid; - if (list_empty(&txq->axq_acq)) + if (list_empty(&txq->axq_acq) || + txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) return; ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); - list_del(&ac->list); - ac->sched = false; + last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); - do { - if (list_empty(&ac->tid_q)) - return; + list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { + last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); + list_del(&ac->list); + ac->sched = false; - tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); - list_del(&tid->list); - tid->sched = false; + while (!list_empty(&ac->tid_q)) { + tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, + list); + list_del(&tid->list); + tid->sched = false; - if (tid->paused) - continue; + if (tid->paused) + continue; - ath_tx_sched_aggr(sc, txq, tid); + ath_tx_sched_aggr(sc, txq, tid); - /* - * add tid to round-robin queue if more frames - * are pending for the tid - */ - if (!list_empty(&tid->buf_q)) - ath_tx_queue_tid(txq, tid); + /* + * add tid to round-robin queue if more frames + * are pending for the tid + */ + if (!list_empty(&tid->buf_q)) + ath_tx_queue_tid(txq, tid); - break; - } while (!list_empty(&ac->tid_q)); + if (tid == last_tid || + txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) + break; + } - if (!list_empty(&ac->tid_q)) { - if (!ac->sched) { - ac->sched = true; - list_add_tail(&ac->list, &txq->axq_acq); + if (!list_empty(&ac->tid_q)) { + if (!ac->sched) { + ac->sched = true; + list_add_tail(&ac->list, &txq->axq_acq); + } } + + if (ac == last_ac || + txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) + return; } } @@ -1301,6 +1326,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]); list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]); INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); + TX_STAT_INC(txq->axq_qnum, puttxbuf); ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); @@ -1308,6 +1334,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, list_splice_tail_init(head, &txq->axq_q); if (txq->axq_link == NULL) { + TX_STAT_INC(txq->axq_qnum, puttxbuf); ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n", txq->axq_qnum, ito64(bf->bf_daddr), @@ -1321,6 +1348,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, } ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, &txq->axq_link); + TX_STAT_INC(txq->axq_qnum, txstart); ath9k_hw_txstart(ah, txq->axq_qnum); } txq->axq_depth++; @@ -1335,7 +1363,6 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, struct list_head bf_head; bf->bf_state.bf_type |= BUF_AMPDU; - TX_STAT_INC(txctl->txq->axq_qnum, a_queued); /* * Do not queue to h/w when any of the following conditions is true: @@ -1351,6 +1378,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, * Add this frame to software queue for scheduling later * for aggregation. */ + TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); list_add_tail(&bf->list, &tid->buf_q); ath_tx_queue_tid(txctl->txq, tid); return; @@ -1364,6 +1392,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, ath_tx_addto_baw(sc, tid, fi->seqno); /* Queue to h/w without aggregation */ + TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); bf->bf_lastbf = bf; ath_buf_set_rate(sc, bf, fi->framelen); ath_tx_txqaddbuf(sc, txctl->txq, &bf_head); @@ -1416,8 +1445,7 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, int framelen) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = tx_info->control.sta; struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; @@ -1635,8 +1663,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_txq *txq, struct sk_buff *skb) { - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_frame_info *fi = get_frame_info(skb); @@ -1652,7 +1679,6 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw, ATH_TXBUF_RESET(bf); - bf->aphy = aphy; bf->bf_flags = setup_tx_flags(skb); bf->bf_mpdu = skb; @@ -1741,8 +1767,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = info->control.sta; - struct ath_wiphy *aphy = hw->priv; - struct ath_softc *sc = aphy->sc; + struct ath_softc *sc = hw->priv; struct ath_txq *txq = txctl->txq; struct ath_buf *bf; int padpos, padsize; @@ -1794,7 +1819,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, spin_lock_bh(&txq->axq_lock); if (txq == sc->tx.txq_map[q] && ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) { - ath_mac80211_stop_queue(sc, q); + ieee80211_stop_queue(sc->hw, q); txq->stopped = 1; } spin_unlock_bh(&txq->axq_lock); @@ -1809,8 +1834,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, /*****************/ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, - struct ath_wiphy *aphy, int tx_flags, int ftype, - struct ath_txq *txq) + int tx_flags, int ftype, struct ath_txq *txq) { struct ieee80211_hw *hw = sc->hw; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); @@ -1820,9 +1844,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); - if (aphy) - hw = aphy->hw; - if (tx_flags & ATH_TX_BAR) tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; @@ -1852,19 +1873,20 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, PS_WAIT_FOR_TX_ACK)); } - if (unlikely(ftype)) - ath9k_tx_status(hw, skb, ftype); - else { - q = skb_get_queue_mapping(skb); - if (txq == sc->tx.txq_map[q]) { - spin_lock_bh(&txq->axq_lock); - if (WARN_ON(--txq->pending_frames < 0)) - txq->pending_frames = 0; - spin_unlock_bh(&txq->axq_lock); - } + q = skb_get_queue_mapping(skb); + if (txq == sc->tx.txq_map[q]) { + spin_lock_bh(&txq->axq_lock); + if (WARN_ON(--txq->pending_frames < 0)) + txq->pending_frames = 0; - ieee80211_tx_status(hw, skb); + if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) { + ieee80211_wake_queue(sc->hw, q); + txq->stopped = 0; + } + spin_unlock_bh(&txq->axq_lock); } + + ieee80211_tx_status(hw, skb); } static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, @@ -1896,8 +1918,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, else complete(&sc->paprd_complete); } else { - ath_debug_stat_tx(sc, bf, ts); - ath_tx_complete(sc, skb, bf->aphy, tx_flags, + ath_debug_stat_tx(sc, bf, ts, txq); + ath_tx_complete(sc, skb, tx_flags, bf->bf_state.bfs_ftype, txq); } /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't @@ -1913,14 +1935,14 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, spin_unlock_irqrestore(&sc->tx.txbuflock, flags); } -static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, - int nframes, int nbad, int txok, bool update_rc) +static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, + struct ath_tx_status *ts, int nframes, int nbad, + int txok, bool update_rc) { struct sk_buff *skb = bf->bf_mpdu; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); - struct ieee80211_hw *hw = bf->aphy->hw; - struct ath_softc *sc = bf->aphy->sc; + struct ieee80211_hw *hw = sc->hw; struct ath_hw *ah = sc->sc_ah; u8 i, tx_rateindex; @@ -1971,19 +1993,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; } -static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum) -{ - struct ath_txq *txq; - - txq = sc->tx.txq_map[qnum]; - spin_lock_bh(&txq->axq_lock); - if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) { - if (ath_mac80211_start_queue(sc, qnum)) - txq->stopped = 0; - } - spin_unlock_bh(&txq->axq_lock); -} - static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) { struct ath_hw *ah = sc->sc_ah; @@ -1994,7 +2003,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) struct ath_tx_status ts; int txok; int status; - int qnum; ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), @@ -2004,6 +2012,8 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) spin_lock_bh(&txq->axq_lock); if (list_empty(&txq->axq_q)) { txq->axq_link = NULL; + if (sc->sc_flags & SC_OP_TXAGGR) + ath_txq_schedule(sc, txq); spin_unlock_bh(&txq->axq_lock); break; } @@ -2038,6 +2048,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) spin_unlock_bh(&txq->axq_lock); break; } + TX_STAT_INC(txq->axq_qnum, txprocdesc); /* * Remove ath_buf's of the same transmit unit from txq, @@ -2058,6 +2069,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) if (bf_is_ampdu_not_probing(bf)) txq->axq_ampdu_depth--; + spin_unlock_bh(&txq->axq_lock); if (bf_held) @@ -2070,27 +2082,45 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) */ if (ts.ts_status & ATH9K_TXERR_XRETRY) bf->bf_state.bf_type |= BUF_XRETRY; - ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true); + ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true); } - qnum = skb_get_queue_mapping(bf->bf_mpdu); - if (bf_isampdu(bf)) ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok, true); else ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); - if (txq == sc->tx.txq_map[qnum]) - ath_wake_mac80211_queue(sc, qnum); - spin_lock_bh(&txq->axq_lock); + if (sc->sc_flags & SC_OP_TXAGGR) ath_txq_schedule(sc, txq); spin_unlock_bh(&txq->axq_lock); } } +static void ath_hw_pll_work(struct work_struct *work) +{ + struct ath_softc *sc = container_of(work, struct ath_softc, + hw_pll_work.work); + static int count; + + if (AR_SREV_9485(sc->sc_ah)) { + if (ar9003_get_pll_sqsum_dvc(sc->sc_ah) >= 0x40000) { + count++; + + if (count == 3) { + /* Rx is hung for more than 500ms. Reset it */ + ath_reset(sc, true); + count = 0; + } + } else + count = 0; + + ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5); + } +} + static void ath_tx_complete_poll_work(struct work_struct *work) { struct ath_softc *sc = container_of(work, struct ath_softc, @@ -2098,6 +2128,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work) struct ath_txq *txq; int i; bool needreset = false; +#ifdef CONFIG_ATH9K_DEBUGFS + sc->tx_complete_poll_work_seen++; +#endif for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) { @@ -2111,6 +2144,33 @@ static void ath_tx_complete_poll_work(struct work_struct *work) } else { txq->axq_tx_inprogress = true; } + } else { + /* If the queue has pending buffers, then it + * should be doing tx work (and have axq_depth). + * Shouldn't get to this state I think..but + * we do. + */ + if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) && + (txq->pending_frames > 0 || + !list_empty(&txq->axq_acq) || + txq->stopped)) { + ath_err(ath9k_hw_common(sc->sc_ah), + "txq: %p axq_qnum: %u," + " mac80211_qnum: %i" + " axq_link: %p" + " pending frames: %i" + " axq_acq empty: %i" + " stopped: %i" + " axq_depth: 0 Attempting to" + " restart tx logic.\n", + txq, txq->axq_qnum, + txq->mac80211_qnum, + txq->axq_link, + txq->pending_frames, + list_empty(&txq->axq_acq), + txq->stopped); + ath_txq_schedule(sc, txq); + } } spin_unlock_bh(&txq->axq_lock); } @@ -2150,7 +2210,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) struct list_head bf_head; int status; int txok; - int qnum; for (;;) { status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs); @@ -2193,11 +2252,9 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) if (!bf_isampdu(bf)) { if (txs.ts_status & ATH9K_TXERR_XRETRY) bf->bf_state.bf_type |= BUF_XRETRY; - ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true); + ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true); } - qnum = skb_get_queue_mapping(bf->bf_mpdu); - if (bf_isampdu(bf)) ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok, true); @@ -2205,19 +2262,19 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) ath_tx_complete_buf(sc, bf, txq, &bf_head, &txs, txok, 0); - if (txq == sc->tx.txq_map[qnum]) - ath_wake_mac80211_queue(sc, qnum); - spin_lock_bh(&txq->axq_lock); + if (!list_empty(&txq->txq_fifo_pending)) { INIT_LIST_HEAD(&bf_head); bf = list_first_entry(&txq->txq_fifo_pending, - struct ath_buf, list); - list_cut_position(&bf_head, &txq->txq_fifo_pending, - &bf->bf_lastbf->list); + struct ath_buf, list); + list_cut_position(&bf_head, + &txq->txq_fifo_pending, + &bf->bf_lastbf->list); ath_tx_txqaddbuf(sc, txq, &bf_head); } else if (sc->sc_flags & SC_OP_TXAGGR) ath_txq_schedule(sc, txq); + spin_unlock_bh(&txq->axq_lock); } } @@ -2285,6 +2342,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs) } INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); + INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work); if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { error = ath_tx_edma_init(sc); diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index d07ff7f2fd92..c6a5fae634a0 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h @@ -283,6 +283,7 @@ struct ar9170 { unsigned int mem_blocks; unsigned int mem_block_size; unsigned int rx_size; + unsigned int tx_seq_table; } fw; /* reset / stuck frames/queue detection */ @@ -533,7 +534,7 @@ void carl9170_rx(struct ar9170 *ar, void *buf, unsigned int len); void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len); /* TX */ -int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb); void carl9170_tx_janitor(struct work_struct *work); void carl9170_tx_process_status(struct ar9170 *ar, const struct carl9170_rsp *cmd); diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c index 546b4e4ec5ea..9517ede9e2df 100644 --- a/drivers/net/wireless/ath/carl9170/fw.c +++ b/drivers/net/wireless/ath/carl9170/fw.c @@ -150,6 +150,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) const struct carl9170fw_otus_desc *otus_desc; const struct carl9170fw_chk_desc *chk_desc; const struct carl9170fw_last_desc *last_desc; + const struct carl9170fw_txsq_desc *txsq_desc; last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC, sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER); @@ -264,6 +265,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) FIF_PROMISC_IN_BSS; } + if (SUPP(CARL9170FW_WOL)) + device_set_wakeup_enable(&ar->udev->dev, true); + ar->fw.vif_num = otus_desc->vif_num; ar->fw.cmd_bufs = otus_desc->cmd_bufs; ar->fw.address = le32_to_cpu(otus_desc->fw_address); @@ -296,6 +300,17 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) } } + txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, + sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER); + + if (txsq_desc) { + ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr); + if (!valid_cpu_addr(ar->fw.tx_seq_table)) + return -EINVAL; + } else { + ar->fw.tx_seq_table = 0; + } + #undef SUPPORTED return 0; } diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h index 3680dfc70f46..30449d21b762 100644 --- a/drivers/net/wireless/ath/carl9170/fwcmd.h +++ b/drivers/net/wireless/ath/carl9170/fwcmd.h @@ -167,6 +167,7 @@ struct carl9170_rx_filter_cmd { #define CARL9170_RX_FILTER_CTL_BACKR 0x20 #define CARL9170_RX_FILTER_MGMT 0x40 #define CARL9170_RX_FILTER_DATA 0x80 +#define CARL9170_RX_FILTER_EVERYTHING (~0) struct carl9170_bcn_ctrl_cmd { __le32 vif_id; diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h index 71f3821f6058..921066822dd5 100644 --- a/drivers/net/wireless/ath/carl9170/fwdesc.h +++ b/drivers/net/wireless/ath/carl9170/fwdesc.h @@ -69,6 +69,9 @@ enum carl9170fw_feature_list { /* Firmware RX filter | CARL9170_CMD_RX_FILTER */ CARL9170FW_RX_FILTER, + /* Wake up on WLAN */ + CARL9170FW_WOL, + /* KEEP LAST */ __CARL9170FW_FEATURE_NUM }; @@ -78,6 +81,7 @@ enum carl9170fw_feature_list { #define FIX_MAGIC "FIX\0" #define DBG_MAGIC "DBG\0" #define CHK_MAGIC "CHK\0" +#define TXSQ_MAGIC "TXSQ" #define LAST_MAGIC "LAST" #define CARL9170FW_SET_DAY(d) (((d) - 1) % 31) @@ -88,8 +92,10 @@ enum carl9170fw_feature_list { #define CARL9170FW_GET_MONTH(m) ((((m) / 31) % 12) + 1) #define CARL9170FW_GET_YEAR(y) ((y) / 372 + 10) +#define CARL9170FW_MAGIC_SIZE 4 + struct carl9170fw_desc_head { - u8 magic[4]; + u8 magic[CARL9170FW_MAGIC_SIZE]; __le16 length; u8 min_ver; u8 cur_ver; @@ -170,6 +176,16 @@ struct carl9170fw_chk_desc { #define CARL9170FW_CHK_DESC_SIZE \ (sizeof(struct carl9170fw_chk_desc)) +#define CARL9170FW_TXSQ_DESC_MIN_VER 1 +#define CARL9170FW_TXSQ_DESC_CUR_VER 1 +struct carl9170fw_txsq_desc { + struct carl9170fw_desc_head head; + + __le32 seq_table_addr; +} __packed; +#define CARL9170FW_TXSQ_DESC_SIZE \ + (sizeof(struct carl9170fw_txsq_desc)) + #define CARL9170FW_LAST_DESC_MIN_VER 1 #define CARL9170FW_LAST_DESC_CUR_VER 2 struct carl9170fw_last_desc { @@ -189,8 +205,8 @@ struct carl9170fw_last_desc { } static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head, - u8 magic[4], __le16 length, - u8 min_ver, u8 cur_ver) + u8 magic[CARL9170FW_MAGIC_SIZE], + __le16 length, u8 min_ver, u8 cur_ver) { head->magic[0] = magic[0]; head->magic[1] = magic[1]; @@ -204,7 +220,7 @@ static inline void carl9170fw_fill_desc(struct carl9170fw_desc_head *head, #define carl9170fw_for_each_hdr(desc, fw_desc) \ for (desc = fw_desc; \ - memcmp(desc->magic, LAST_MAGIC, 4) && \ + memcmp(desc->magic, LAST_MAGIC, CARL9170FW_MAGIC_SIZE) && \ le16_to_cpu(desc->length) >= CARL9170FW_DESC_HEAD_SIZE && \ le16_to_cpu(desc->length) < CARL9170FW_DESC_MAX_LENGTH; \ desc = (void *)((unsigned long)desc + le16_to_cpu(desc->length))) @@ -218,8 +234,8 @@ static inline bool carl9170fw_supports(__le32 list, u8 feature) } static inline bool carl9170fw_desc_cmp(const struct carl9170fw_desc_head *head, - const u8 descid[4], u16 min_len, - u8 compatible_revision) + const u8 descid[CARL9170FW_MAGIC_SIZE], + u16 min_len, u8 compatible_revision) { if (descid[0] == head->magic[0] && descid[1] == head->magic[1] && descid[2] == head->magic[2] && descid[3] == head->magic[3] && diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h index e85df6edfed3..4e30762dd903 100644 --- a/drivers/net/wireless/ath/carl9170/hw.h +++ b/drivers/net/wireless/ath/carl9170/hw.h @@ -463,6 +463,8 @@ #define AR9170_PWR_REG_CHIP_REVISION (AR9170_PWR_REG_BASE + 0x010) #define AR9170_PWR_REG_PLL_ADDAC (AR9170_PWR_REG_BASE + 0x014) +#define AR9170_PWR_PLL_ADDAC_DIV_S 2 +#define AR9170_PWR_PLL_ADDAC_DIV 0xffc #define AR9170_PWR_REG_WATCH_DOG_MAGIC (AR9170_PWR_REG_BASE + 0x020) /* Faraday USB Controller */ @@ -471,6 +473,9 @@ #define AR9170_USB_REG_MAIN_CTRL (AR9170_USB_REG_BASE + 0x000) #define AR9170_USB_MAIN_CTRL_REMOTE_WAKEUP BIT(0) #define AR9170_USB_MAIN_CTRL_ENABLE_GLOBAL_INT BIT(2) +#define AR9170_USB_MAIN_CTRL_GO_TO_SUSPEND BIT(3) +#define AR9170_USB_MAIN_CTRL_RESET BIT(4) +#define AR9170_USB_MAIN_CTRL_CHIP_ENABLE BIT(5) #define AR9170_USB_MAIN_CTRL_HIGHSPEED BIT(6) #define AR9170_USB_REG_DEVICE_ADDRESS (AR9170_USB_REG_BASE + 0x001) @@ -499,6 +504,13 @@ #define AR9170_USB_REG_INTR_GROUP (AR9170_USB_REG_BASE + 0x020) #define AR9170_USB_REG_INTR_SOURCE_0 (AR9170_USB_REG_BASE + 0x021) +#define AR9170_USB_INTR_SRC0_SETUP BIT(0) +#define AR9170_USB_INTR_SRC0_IN BIT(1) +#define AR9170_USB_INTR_SRC0_OUT BIT(2) +#define AR9170_USB_INTR_SRC0_FAIL BIT(3) /* ??? */ +#define AR9170_USB_INTR_SRC0_END BIT(4) /* ??? */ +#define AR9170_USB_INTR_SRC0_ABORT BIT(7) + #define AR9170_USB_REG_INTR_SOURCE_1 (AR9170_USB_REG_BASE + 0x022) #define AR9170_USB_REG_INTR_SOURCE_2 (AR9170_USB_REG_BASE + 0x023) #define AR9170_USB_REG_INTR_SOURCE_3 (AR9170_USB_REG_BASE + 0x024) @@ -506,6 +518,15 @@ #define AR9170_USB_REG_INTR_SOURCE_5 (AR9170_USB_REG_BASE + 0x026) #define AR9170_USB_REG_INTR_SOURCE_6 (AR9170_USB_REG_BASE + 0x027) #define AR9170_USB_REG_INTR_SOURCE_7 (AR9170_USB_REG_BASE + 0x028) +#define AR9170_USB_INTR_SRC7_USB_RESET BIT(1) +#define AR9170_USB_INTR_SRC7_USB_SUSPEND BIT(2) +#define AR9170_USB_INTR_SRC7_USB_RESUME BIT(3) +#define AR9170_USB_INTR_SRC7_ISO_SEQ_ERR BIT(4) +#define AR9170_USB_INTR_SRC7_ISO_SEQ_ABORT BIT(5) +#define AR9170_USB_INTR_SRC7_TX0BYTE BIT(6) +#define AR9170_USB_INTR_SRC7_RX0BYTE BIT(7) + +#define AR9170_USB_REG_IDLE_COUNT (AR9170_USB_REG_BASE + 0x02f) #define AR9170_USB_REG_EP_MAP (AR9170_USB_REG_BASE + 0x030) #define AR9170_USB_REG_EP1_MAP (AR9170_USB_REG_BASE + 0x030) @@ -581,6 +602,10 @@ #define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110) #define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114) + +#define AR9170_USB_REG_WAKE_UP (AR9170_USB_REG_BASE + 0x120) +#define AR9170_USB_WAKE_UP_WAKE BIT(0) + #define AR9170_USB_REG_CBUS_CTRL (AR9170_USB_REG_BASE + 0x1f0) #define AR9170_USB_CBUS_CTRL_BUFFER_END (BIT(1)) diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 870df8c42622..ede3d7e5a048 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -662,6 +662,13 @@ init: goto unlock; } + if (ar->fw.tx_seq_table) { + err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4, + 0); + if (err) + goto unlock; + } + unlock: if (err && (vif_id >= 0)) { vif_priv->active = false; @@ -1279,7 +1286,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, - u16 tid, u16 *ssn) + u16 tid, u16 *ssn, u8 buf_size) { struct ar9170 *ar = hw->priv; struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 6cc58e052d10..0ef70b6fc512 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -862,6 +862,9 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)) txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB; + if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) + txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ; + if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF; @@ -1336,7 +1339,7 @@ err_unlock_rcu: return false; } -int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ar9170 *ar = hw->priv; struct ieee80211_tx_info *info; @@ -1370,12 +1373,11 @@ int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) } carl9170_tx(ar); - return NETDEV_TX_OK; + return; err_free: ar->tx_dropped++; dev_kfree_skb_any(skb); - return NETDEV_TX_OK; } void carl9170_tx_scheduler(struct ar9170 *ar) diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h index ee0f84f2a2f6..15095c035169 100644 --- a/drivers/net/wireless/ath/carl9170/version.h +++ b/drivers/net/wireless/ath/carl9170/version.h @@ -1,7 +1,7 @@ #ifndef __CARL9170_SHARED_VERSION_H #define __CARL9170_SHARED_VERSION_H -#define CARL9170FW_VERSION_YEAR 10 -#define CARL9170FW_VERSION_MONTH 10 -#define CARL9170FW_VERSION_DAY 29 -#define CARL9170FW_VERSION_GIT "1.9.0" +#define CARL9170FW_VERSION_YEAR 11 +#define CARL9170FW_VERSION_MONTH 1 +#define CARL9170FW_VERSION_DAY 22 +#define CARL9170FW_VERSION_GIT "1.9.2" #endif /* __CARL9170_SHARED_VERSION_H */ diff --git a/drivers/net/wireless/ath/carl9170/wlan.h b/drivers/net/wireless/ath/carl9170/wlan.h index 24d63b583b6b..9e1324b67e08 100644 --- a/drivers/net/wireless/ath/carl9170/wlan.h +++ b/drivers/net/wireless/ath/carl9170/wlan.h @@ -251,7 +251,7 @@ struct carl9170_tx_superdesc { u8 ampdu_commit_factor:1; u8 ampdu_unused_bit:1; u8 queue:2; - u8 reserved:1; + u8 assign_seq:1; u8 vif_id:3; u8 fill_in_tsf:1; u8 cab:1; @@ -299,6 +299,7 @@ struct _ar9170_tx_hwdesc { #define CARL9170_TX_SUPER_MISC_QUEUE 0x3 #define CARL9170_TX_SUPER_MISC_QUEUE_S 0 +#define CARL9170_TX_SUPER_MISC_ASSIGN_SEQ 0x4 #define CARL9170_TX_SUPER_MISC_VIF_ID 0x38 #define CARL9170_TX_SUPER_MISC_VIF_ID_S 3 #define CARL9170_TX_SUPER_MISC_FILL_IN_TSF 0x40 @@ -413,6 +414,23 @@ enum ar9170_txq { __AR9170_NUM_TXQ, }; +/* + * This is an workaround for several undocumented bugs. + * Don't mess with the QoS/AC <-> HW Queue map, if you don't + * know what you are doing. + * + * Known problems [hardware]: + * * The MAC does not aggregate frames on anything other + * than the first HW queue. + * * when an AMPDU is placed [in the first hw queue] and + * additional frames are already queued on a different + * hw queue, the MAC will ALWAYS freeze. + * + * In a nutshell: The hardware can either do QoS or + * Aggregation but not both at the same time. As a + * result, this makes the device pretty much useless + * for any serious 802.11n setup. + */ static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 2, 1, 0, 3 }; #define AR9170_TXQ_DEPTH 32 diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c index 5d465e5fcf24..37b8e115375a 100644 --- a/drivers/net/wireless/ath/key.c +++ b/drivers/net/wireless/ath/key.c @@ -58,8 +58,11 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry) REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0); REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); - if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) + if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED) { REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), + AR_KEYTABLE_TYPE_CLR); + } } diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 2b14775e6bc6..f828f294ba89 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -158,6 +158,13 @@ ieee80211_regdomain *ath_world_regdomain(struct ath_regulatory *reg) } } +bool ath_is_49ghz_allowed(u16 regdomain) +{ + /* possibly more */ + return regdomain == MKK9_MKKC; +} +EXPORT_SYMBOL(ath_is_49ghz_allowed); + /* Frequency is one where radar detection is required */ static bool ath_is_radar_freq(u16 center_freq) { diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index 345dd9721b41..172f63f671cf 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -250,6 +250,7 @@ enum CountryCode { }; bool ath_is_world_regd(struct ath_regulatory *reg); +bool ath_is_49ghz_allowed(u16 redomain); int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy, int (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request)); diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig index 47033f6a1c2b..480595f04411 100644 --- a/drivers/net/wireless/b43/Kconfig +++ b/drivers/net/wireless/b43/Kconfig @@ -92,7 +92,7 @@ config B43_PHY_N ---help--- Support for the N-PHY. - This enables support for devices with N-PHY revision up to 2. + This enables support for devices with N-PHY. Say N if you expect high stability and performance. Saying Y will not affect other devices support and may provide support for basic needs. diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 22bc9f17f634..57eb5b649730 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -3203,7 +3203,7 @@ static void b43_tx_work(struct work_struct *work) mutex_unlock(&wl->mutex); } -static int b43_op_tx(struct ieee80211_hw *hw, +static void b43_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct b43_wl *wl = hw_to_b43_wl(hw); @@ -3211,14 +3211,12 @@ static int b43_op_tx(struct ieee80211_hw *hw, if (unlikely(skb->len < 2 + 2 + 6)) { /* Too short, this can't be a valid frame. */ dev_kfree_skb_any(skb); - return NETDEV_TX_OK; + return; } B43_WARN_ON(skb_shinfo(skb)->nr_frags); skb_queue_tail(&wl->tx_queue, skb); ieee80211_queue_work(wl->hw, &wl->tx_work); - - return NETDEV_TX_OK; } static void b43_qos_params_upload(struct b43_wldev *dev, diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index ab81ed8b19d7..8a00f9a95dbb 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -430,9 +430,9 @@ static void b43_radio_init2055_post(struct b43_wldev *dev) bool workaround = false; if (sprom->revision < 4) - workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM || - binfo->type != 0x46D || - binfo->rev < 0x41); + workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM && + binfo->type == 0x46D && + binfo->rev >= 0x41); else workaround = !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS); @@ -1168,23 +1168,98 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; + struct ssb_sprom *sprom = &(dev->dev->bus->sprom); + + /* PHY rev 0, 1, 2 */ u8 i, j; u8 code; u16 tmp; + u8 rfseq_events[3] = { 6, 8, 7 }; + u8 rfseq_delays[3] = { 10, 30, 1 }; - /* TODO: for PHY >= 3 - s8 *lna1_gain, *lna2_gain; - u8 *gain_db, *gain_bits; - u16 *rfseq_init; + /* PHY rev >= 3 */ + bool ghz5; + bool ext_lna; + u16 rssi_gain; + struct nphy_gain_ctl_workaround_entry *e; u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 }; u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 }; - */ - - u8 rfseq_events[3] = { 6, 8, 7 }; - u8 rfseq_delays[3] = { 10, 30, 1 }; if (dev->phy.rev >= 3) { - /* TODO */ + /* Prepare values */ + ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL) + & B43_NPHY_BANDCTL_5GHZ; + ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA; + e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna); + if (ghz5 && dev->phy.rev >= 5) + rssi_gain = 0x90; + else + rssi_gain = 0x50; + + b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040); + + /* Set Clip 2 detect */ + b43_phy_set(dev, B43_NPHY_C1_CGAINI, + B43_NPHY_C1_CGAINI_CL2DETECT); + b43_phy_set(dev, B43_NPHY_C2_CGAINI, + B43_NPHY_C2_CGAINI_CL2DETECT); + + b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0); + b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0); + b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00); + b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00); + b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN, + rssi_gain); + b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN, + rssi_gain); + b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC, + 0x17); + b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF); + b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF); + + b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db); + b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits); + b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits); + b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain); + b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits); + b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits); + + b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); + b43_phy_write(dev, 0x2A7, e->init_gain); + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2, + e->rfseq_init); + b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); + + /* TODO: check defines. Do not match variables names */ + b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain); + b43_phy_write(dev, 0x2A9, e->cliphi_gain); + b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain); + b43_phy_write(dev, 0x2AB, e->clipmd_gain); + b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain); + b43_phy_write(dev, 0x2AD, e->cliplo_gain); + + b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin); + b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl); + b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu); + b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip); + b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip); + b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, + ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip); + b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, + ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip); + b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); } else { /* Set Clip 2 detect */ b43_phy_set(dev, B43_NPHY_C1_CGAINI, @@ -1281,17 +1356,17 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev) B43_NPHY_TABLE_DATALO, tmp); } } + } - b43_nphy_set_rf_sequence(dev, 5, - rfseq_events, rfseq_delays, 3); - b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, - ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, - 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); + b43_nphy_set_rf_sequence(dev, 5, + rfseq_events, rfseq_delays, 3); + b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1, + ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF, + 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT); - if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) - b43_phy_maskset(dev, B43_PHY_N(0xC5D), - 0xFF80, 4); - } + if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) + b43_phy_maskset(dev, B43_PHY_N(0xC5D), + 0xFF80, 4); } } @@ -1308,6 +1383,9 @@ static void b43_nphy_workarounds(struct b43_wldev *dev) u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; + u16 tmp16; + u32 tmp32; + if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) b43_nphy_classifier(dev, 1, 0); else @@ -1320,7 +1398,82 @@ static void b43_nphy_workarounds(struct b43_wldev *dev) B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); if (dev->phy.rev >= 3) { + tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0)); + tmp32 &= 0xffffff; + b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32); + + b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125); + b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3); + b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105); + b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E); + b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD); + b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020); + + b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C); + b43_phy_write(dev, 0x2AE, 0x000C); + /* TODO */ + + tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? + 0x2 : 0x9C40; + b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16); + + b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700); + + b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); + b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D); + + b43_nphy_gain_ctrl_workarounds(dev); + + b43_ntab_write(dev, B43_NTAB32(8, 0), 2); + b43_ntab_write(dev, B43_NTAB32(8, 16), 2); + + /* TODO */ + + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00); + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00); + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06); + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06); + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07); + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07); + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88); + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88); + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00); + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00); + + /* N PHY WAR TX Chain Update with hw_phytxchain as argument */ + + if ((bus->sprom.boardflags2_lo & B43_BFL2_APLL_WAR && + b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || + (bus->sprom.boardflags2_lo & B43_BFL2_GPLL_WAR && + b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) + tmp32 = 0x00088888; + else + tmp32 = 0x88888888; + b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32); + b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32); + b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32); + + if (dev->phy.rev == 4 && + b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { + b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC, + 0x70); + b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC, + 0x70); + } + + b43_phy_write(dev, 0x224, 0x039C); + b43_phy_write(dev, 0x225, 0x0357); + b43_phy_write(dev, 0x226, 0x0317); + b43_phy_write(dev, 0x227, 0x02D7); + b43_phy_write(dev, 0x228, 0x039C); + b43_phy_write(dev, 0x229, 0x0357); + b43_phy_write(dev, 0x22A, 0x0317); + b43_phy_write(dev, 0x22B, 0x02D7); + b43_phy_write(dev, 0x22C, 0x039C); + b43_phy_write(dev, 0x22D, 0x0357); + b43_phy_write(dev, 0x22E, 0x0317); + b43_phy_write(dev, 0x22F, 0x02D7); } else { if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && nphy->band5g_pwrgain) { @@ -2128,7 +2281,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf, save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0); save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1); - } else if (dev->phy.rev == 2) { + } else { save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1); save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2); save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); @@ -2179,7 +2332,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf, b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]); b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]); b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]); - } else if (dev->phy.rev == 2) { + } else { b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]); b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]); b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]); @@ -3878,10 +4031,14 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, } } +/* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */ static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on) { - b43_phy_write(dev, B43_NPHY_AFECTL_OVER, - on ? 0 : 0x7FFF); + u16 val = on ? 0 : 0x7FFF; + + if (dev->phy.rev >= 3) + b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, val); + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, val); } static int b43_nphy_op_switch_channel(struct b43_wldev *dev, diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c index dc8ef09a8552..2de483b3d3ba 100644 --- a/drivers/net/wireless/b43/tables_nphy.c +++ b/drivers/net/wireless/b43/tables_nphy.c @@ -1097,6 +1097,1080 @@ static const u32 b43_ntab_tmap[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; +/* static tables, PHY revision >= 3 */ +static const u32 b43_ntab_framestruct_r3[] = { + 0x08004a04, 0x00100000, 0x01000a05, 0x00100020, + 0x09804506, 0x00100030, 0x09804507, 0x00100030, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004a0c, 0x00100004, 0x01000a0d, 0x00100024, + 0x0980450e, 0x00100034, 0x0980450f, 0x00100034, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000a04, 0x00100000, 0x11008a05, 0x00100020, + 0x1980c506, 0x00100030, 0x21810506, 0x00100030, + 0x21810506, 0x00100030, 0x01800504, 0x00100030, + 0x11808505, 0x00100030, 0x29814507, 0x01100030, + 0x00000a04, 0x00100000, 0x11008a05, 0x00100020, + 0x21810506, 0x00100030, 0x21810506, 0x00100030, + 0x29814507, 0x01100030, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028, + 0x1980c50e, 0x00100038, 0x2181050e, 0x00100038, + 0x2181050e, 0x00100038, 0x0180050c, 0x00100038, + 0x1180850d, 0x00100038, 0x2981450f, 0x01100038, + 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028, + 0x2181050e, 0x00100038, 0x2181050e, 0x00100038, + 0x2981450f, 0x01100038, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004a04, 0x00100000, 0x01000a05, 0x00100020, + 0x1980c506, 0x00100030, 0x1980c506, 0x00100030, + 0x11808504, 0x00100030, 0x3981ca05, 0x00100030, + 0x29814507, 0x01100030, 0x00000000, 0x00000000, + 0x10008a04, 0x00100000, 0x3981ca05, 0x00100030, + 0x1980c506, 0x00100030, 0x29814507, 0x01100030, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004a0c, 0x00100008, 0x01000a0d, 0x00100028, + 0x1980c50e, 0x00100038, 0x1980c50e, 0x00100038, + 0x1180850c, 0x00100038, 0x3981ca0d, 0x00100038, + 0x2981450f, 0x01100038, 0x00000000, 0x00000000, + 0x10008a0c, 0x00100008, 0x3981ca0d, 0x00100038, + 0x1980c50e, 0x00100038, 0x2981450f, 0x01100038, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x40021404, 0x00100000, 0x02001405, 0x00100040, + 0x0b004a06, 0x01900060, 0x13008a06, 0x01900060, + 0x13008a06, 0x01900060, 0x43020a04, 0x00100060, + 0x1b00ca05, 0x00100060, 0x23010a07, 0x01500060, + 0x40021404, 0x00100000, 0x1a00d405, 0x00100040, + 0x13008a06, 0x01900060, 0x13008a06, 0x01900060, + 0x23010a07, 0x01500060, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140c, 0x00100010, 0x0200140d, 0x00100050, + 0x0b004a0e, 0x01900070, 0x13008a0e, 0x01900070, + 0x13008a0e, 0x01900070, 0x43020a0c, 0x00100070, + 0x1b00ca0d, 0x00100070, 0x23010a0f, 0x01500070, + 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050, + 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070, + 0x23010a0f, 0x01500070, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x50029404, 0x00100000, 0x32019405, 0x00100040, + 0x0b004a06, 0x01900060, 0x0b004a06, 0x01900060, + 0x5b02ca04, 0x00100060, 0x3b01d405, 0x00100060, + 0x23010a07, 0x01500060, 0x00000000, 0x00000000, + 0x5802d404, 0x00100000, 0x3b01d405, 0x00100060, + 0x0b004a06, 0x01900060, 0x23010a07, 0x01500060, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x5002940c, 0x00100010, 0x3201940d, 0x00100050, + 0x0b004a0e, 0x01900070, 0x0b004a0e, 0x01900070, + 0x5b02ca0c, 0x00100070, 0x3b01d40d, 0x00100070, + 0x23010a0f, 0x01500070, 0x00000000, 0x00000000, + 0x5802d40c, 0x00100010, 0x3b01d40d, 0x00100070, + 0x0b004a0e, 0x01900070, 0x23010a0f, 0x01500070, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x40021404, 0x000f4800, 0x62031405, 0x00100040, + 0x53028a06, 0x01900060, 0x53028a07, 0x01900060, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140c, 0x000f4808, 0x6203140d, 0x00100048, + 0x53028a0e, 0x01900068, 0x53028a0f, 0x01900068, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000a0c, 0x00100004, 0x11008a0d, 0x00100024, + 0x1980c50e, 0x00100034, 0x2181050e, 0x00100034, + 0x2181050e, 0x00100034, 0x0180050c, 0x00100038, + 0x1180850d, 0x00100038, 0x1181850d, 0x00100038, + 0x2981450f, 0x01100038, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028, + 0x2181050e, 0x00100038, 0x2181050e, 0x00100038, + 0x1181850d, 0x00100038, 0x2981450f, 0x01100038, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x08004a04, 0x00100000, 0x01000a05, 0x00100020, + 0x0180c506, 0x00100030, 0x0180c506, 0x00100030, + 0x2180c50c, 0x00100030, 0x49820a0d, 0x0016a130, + 0x41824a0d, 0x0016a130, 0x2981450f, 0x01100030, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x2000ca0c, 0x00100000, 0x49820a0d, 0x0016a130, + 0x1980c50e, 0x00100030, 0x41824a0d, 0x0016a130, + 0x2981450f, 0x01100030, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140c, 0x00100008, 0x0200140d, 0x00100048, + 0x0b004a0e, 0x01900068, 0x13008a0e, 0x01900068, + 0x13008a0e, 0x01900068, 0x43020a0c, 0x00100070, + 0x1b00ca0d, 0x00100070, 0x1b014a0d, 0x00100070, + 0x23010a0f, 0x01500070, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050, + 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070, + 0x1b014a0d, 0x00100070, 0x23010a0f, 0x01500070, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x50029404, 0x00100000, 0x32019405, 0x00100040, + 0x03004a06, 0x01900060, 0x03004a06, 0x01900060, + 0x6b030a0c, 0x00100060, 0x4b02140d, 0x0016a160, + 0x4302540d, 0x0016a160, 0x23010a0f, 0x01500060, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x6b03140c, 0x00100060, 0x4b02140d, 0x0016a160, + 0x0b004a0e, 0x01900060, 0x4302540d, 0x0016a160, + 0x23010a0f, 0x01500060, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x40021404, 0x00100000, 0x1a00d405, 0x00100040, + 0x53028a06, 0x01900060, 0x5b02ca06, 0x01900060, + 0x5b02ca06, 0x01900060, 0x43020a04, 0x00100060, + 0x1b00ca05, 0x00100060, 0x53028a07, 0x0190c060, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050, + 0x53028a0e, 0x01900070, 0x5b02ca0e, 0x01900070, + 0x5b02ca0e, 0x01900070, 0x43020a0c, 0x00100070, + 0x1b00ca0d, 0x00100070, 0x53028a0f, 0x0190c070, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x40021404, 0x00100000, 0x1a00d405, 0x00100040, + 0x5b02ca06, 0x01900060, 0x5b02ca06, 0x01900060, + 0x53028a07, 0x0190c060, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050, + 0x5b02ca0e, 0x01900070, 0x5b02ca0e, 0x01900070, + 0x53028a0f, 0x0190c070, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static const u16 b43_ntab_pilot_r3[] = { + 0xff08, 0xff08, 0xff08, 0xff08, 0xff08, 0xff08, + 0xff08, 0xff08, 0x80d5, 0x80d5, 0x80d5, 0x80d5, + 0x80d5, 0x80d5, 0x80d5, 0x80d5, 0xff0a, 0xff82, + 0xffa0, 0xff28, 0xffff, 0xffff, 0xffff, 0xffff, + 0xff82, 0xffa0, 0xff28, 0xff0a, 0xffff, 0xffff, + 0xffff, 0xffff, 0xf83f, 0xfa1f, 0xfa97, 0xfab5, + 0xf2bd, 0xf0bf, 0xffff, 0xffff, 0xf017, 0xf815, + 0xf215, 0xf095, 0xf035, 0xf01d, 0xffff, 0xffff, + 0xff08, 0xff02, 0xff80, 0xff20, 0xff08, 0xff02, + 0xff80, 0xff20, 0xf01f, 0xf817, 0xfa15, 0xf295, + 0xf0b5, 0xf03d, 0xffff, 0xffff, 0xf82a, 0xfa0a, + 0xfa82, 0xfaa0, 0xf2a8, 0xf0aa, 0xffff, 0xffff, + 0xf002, 0xf800, 0xf200, 0xf080, 0xf020, 0xf008, + 0xffff, 0xffff, 0xf00a, 0xf802, 0xfa00, 0xf280, + 0xf0a0, 0xf028, 0xffff, 0xffff, +}; + +static const u32 b43_ntab_tmap_r3[] = { + 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, + 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0xf1111110, 0x11111111, 0x11f11111, 0x00000111, + 0x11000000, 0x1111f111, 0x11111111, 0x111111f1, + 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x000aa888, + 0x88880000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0xa1111110, 0x11111111, 0x11c11111, 0x00000111, + 0x11000000, 0x1111a111, 0x11111111, 0x111111a1, + 0xa2222220, 0x22222222, 0x22c22222, 0x00000222, + 0x22000000, 0x2222a222, 0x22222222, 0x222222a2, + 0xf1111110, 0x11111111, 0x11f11111, 0x00011111, + 0x11110000, 0x1111f111, 0x11111111, 0x111111f1, + 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00088aaa, + 0xaaaa0000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a, + 0xaaa8aaa0, 0x8aaa8aaa, 0xaa8a8a8a, 0x000aaa88, + 0x8aaa0000, 0xaaa8a888, 0x8aa88a8a, 0x8a88a888, + 0x08080a00, 0x0a08080a, 0x080a0a08, 0x00080808, + 0x080a0000, 0x080a0808, 0x080a0808, 0x0a0a0a08, + 0xa0a0a0a0, 0x80a0a080, 0x8080a0a0, 0x00008080, + 0x80a00000, 0x80a080a0, 0xa080a0a0, 0x8080a0a0, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x99999000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9, + 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999, + 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888, + 0x22000000, 0x2222b222, 0x22222222, 0x222222b2, + 0xb2222220, 0x22222222, 0x22d22222, 0x00000222, + 0x11000000, 0x1111a111, 0x11111111, 0x111111a1, + 0xa1111110, 0x11111111, 0x11c11111, 0x00000111, + 0x33000000, 0x3333b333, 0x33333333, 0x333333b3, + 0xb3333330, 0x33333333, 0x33d33333, 0x00000333, + 0x22000000, 0x2222a222, 0x22222222, 0x222222a2, + 0xa2222220, 0x22222222, 0x22c22222, 0x00000222, + 0x99b99b00, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9, + 0x9b99bb99, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999, + 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888, + 0x22222200, 0x2222f222, 0x22222222, 0x222222f2, + 0x22222222, 0x22222222, 0x22f22222, 0x00000222, + 0x11000000, 0x1111f111, 0x11111111, 0x11111111, + 0xf1111111, 0x11111111, 0x11f11111, 0x01111111, + 0xbb9bb900, 0xb9b9bb99, 0xb99bbbbb, 0xbbbb9b9b, + 0xb9bb99bb, 0xb99999b9, 0xb9b9b99b, 0x00000bbb, + 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a, + 0xa8aa88aa, 0xa88888a8, 0xa8a8a88a, 0x0a888aaa, + 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a, + 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00000aaa, + 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, + 0xbbbbbb00, 0x999bbbbb, 0x9bb99b9b, 0xb9b9b9bb, + 0xb9b99bbb, 0xb9b9b9bb, 0xb9bb9b99, 0x00000999, + 0x8a000000, 0xaa88a888, 0xa88888aa, 0xa88a8a88, + 0xa88aa88a, 0x88a8aaaa, 0xa8aa8aaa, 0x0888a88a, + 0x0b0b0b00, 0x090b0b0b, 0x0b090b0b, 0x0909090b, + 0x09090b0b, 0x09090b0b, 0x09090b09, 0x00000909, + 0x0a000000, 0x0a080808, 0x080a080a, 0x080a0a08, + 0x080a080a, 0x0808080a, 0x0a0a0a08, 0x0808080a, + 0xb0b0b000, 0x9090b0b0, 0x90b09090, 0xb0b0b090, + 0xb0b090b0, 0x90b0b0b0, 0xb0b09090, 0x00000090, + 0x80000000, 0xa080a080, 0xa08080a0, 0xa0808080, + 0xa080a080, 0x80a0a0a0, 0xa0a080a0, 0x00a0a0a0, + 0x22000000, 0x2222f222, 0x22222222, 0x222222f2, + 0xf2222220, 0x22222222, 0x22f22222, 0x00000222, + 0x11000000, 0x1111f111, 0x11111111, 0x111111f1, + 0xf1111110, 0x11111111, 0x11f11111, 0x00000111, + 0x33000000, 0x3333f333, 0x33333333, 0x333333f3, + 0xf3333330, 0x33333333, 0x33f33333, 0x00000333, + 0x22000000, 0x2222f222, 0x22222222, 0x222222f2, + 0xf2222220, 0x22222222, 0x22f22222, 0x00000222, + 0x99000000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9, + 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999, + 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, + 0x88888000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, + 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888, + 0x88a88a00, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, + 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888, + 0x11000000, 0x1111a111, 0x11111111, 0x111111a1, + 0xa1111110, 0x11111111, 0x11c11111, 0x00000111, + 0x11000000, 0x1111a111, 0x11111111, 0x111111a1, + 0xa1111110, 0x11111111, 0x11c11111, 0x00000111, + 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, + 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, + 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static const u32 b43_ntab_intlevel_r3[] = { + 0x00802070, 0x0671188d, 0x0a60192c, 0x0a300e46, + 0x00c1188d, 0x080024d2, 0x00000070, +}; + +static const u32 b43_ntab_tdtrn_r3[] = { + 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6, + 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68, + 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52, + 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050, + 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6, + 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68, + 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52, + 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050, + 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246, + 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c, + 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61, + 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d, + 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246, + 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c, + 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61, + 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d, + 0xfa58fa58, 0xf895043b, 0xff4c09c0, 0xfbc6ffa8, + 0xfb84f384, 0x0798f6f9, 0x05760122, 0x058409f6, + 0x0b500000, 0x05b7f542, 0x08860432, 0x06ddfee7, + 0xfb84f384, 0xf9d90664, 0xf7e8025c, 0x00fff7bd, + 0x05a805a8, 0xf7bd00ff, 0x025cf7e8, 0x0664f9d9, + 0xf384fb84, 0xfee706dd, 0x04320886, 0xf54205b7, + 0x00000b50, 0x09f60584, 0x01220576, 0xf6f90798, + 0xf384fb84, 0xffa8fbc6, 0x09c0ff4c, 0x043bf895, + 0x02d402d4, 0x07de0270, 0xfc96079c, 0xf90afe94, + 0xfe00ff2c, 0x02d4065d, 0x092a0096, 0x0014fbb8, + 0xfd2cfd2c, 0x076afb3c, 0x0096f752, 0xf991fd87, + 0xfb2c0200, 0xfeb8f960, 0x08e0fc96, 0x049802a8, + 0xfd2cfd2c, 0x02a80498, 0xfc9608e0, 0xf960feb8, + 0x0200fb2c, 0xfd87f991, 0xf7520096, 0xfb3c076a, + 0xfd2cfd2c, 0xfbb80014, 0x0096092a, 0x065d02d4, + 0xff2cfe00, 0xfe94f90a, 0x079cfc96, 0x027007de, + 0x02d402d4, 0x027007de, 0x079cfc96, 0xfe94f90a, + 0xff2cfe00, 0x065d02d4, 0x0096092a, 0xfbb80014, + 0xfd2cfd2c, 0xfb3c076a, 0xf7520096, 0xfd87f991, + 0x0200fb2c, 0xf960feb8, 0xfc9608e0, 0x02a80498, + 0xfd2cfd2c, 0x049802a8, 0x08e0fc96, 0xfeb8f960, + 0xfb2c0200, 0xf991fd87, 0x0096f752, 0x076afb3c, + 0xfd2cfd2c, 0x0014fbb8, 0x092a0096, 0x02d4065d, + 0xfe00ff2c, 0xf90afe94, 0xfc96079c, 0x07de0270, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d, + 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf, + 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8, + 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7, + 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3, + 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841, + 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608, + 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759, + 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d, + 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf, + 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8, + 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7, + 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3, + 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841, + 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608, + 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759, + 0x061c061c, 0xff30009d, 0xffb21141, 0xfd87fb54, + 0xf65dfe59, 0x02eef99e, 0x0166f03c, 0xfff809b6, + 0x000008a4, 0x000af42b, 0x00eff577, 0xfa840bf2, + 0xfc02ff51, 0x08260f67, 0xfff0036f, 0x0842f9c3, + 0x00000000, 0x063df7be, 0xfc910010, 0xf099f7da, + 0x00af03fe, 0xf40e057c, 0x0a89ff11, 0x0bd5fff6, + 0xf75c0000, 0xf64a0008, 0x0fc4fe9a, 0x0662fd12, + 0x01a709a3, 0x04ac0279, 0xeebf004e, 0xff6300d0, + 0xf9e4f9e4, 0x00d0ff63, 0x004eeebf, 0x027904ac, + 0x09a301a7, 0xfd120662, 0xfe9a0fc4, 0x0008f64a, + 0x0000f75c, 0xfff60bd5, 0xff110a89, 0x057cf40e, + 0x03fe00af, 0xf7daf099, 0x0010fc91, 0xf7be063d, + 0x00000000, 0xf9c30842, 0x036ffff0, 0x0f670826, + 0xff51fc02, 0x0bf2fa84, 0xf57700ef, 0xf42b000a, + 0x08a40000, 0x09b6fff8, 0xf03c0166, 0xf99e02ee, + 0xfe59f65d, 0xfb54fd87, 0x1141ffb2, 0x009dff30, + 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59, + 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766, + 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986, + 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb, + 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7, + 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a, + 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a, + 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705, + 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59, + 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766, + 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986, + 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb, + 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7, + 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a, + 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a, + 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705, + 0xfa58fa58, 0xf8f0fe00, 0x0448073d, 0xfdc9fe46, + 0xf9910258, 0x089d0407, 0xfd5cf71a, 0x02affde0, + 0x083e0496, 0xff5a0740, 0xff7afd97, 0x00fe01f1, + 0x0009082e, 0xfa94ff75, 0xfecdf8ea, 0xffb0f693, + 0xfd2cfa58, 0x0433ff16, 0xfba405dd, 0xfa610341, + 0x06a606cb, 0x0039fd2d, 0x0677fa97, 0x01fa05e0, + 0xf896003e, 0x075a068b, 0x012cfc3e, 0xfa23f98d, + 0xfc7cfd43, 0xff90fc0d, 0x01c10982, 0x00c601d6, + 0xfd2cfd2c, 0x01d600c6, 0x098201c1, 0xfc0dff90, + 0xfd43fc7c, 0xf98dfa23, 0xfc3e012c, 0x068b075a, + 0x003ef896, 0x05e001fa, 0xfa970677, 0xfd2d0039, + 0x06cb06a6, 0x0341fa61, 0x05ddfba4, 0xff160433, + 0xfa58fd2c, 0xf693ffb0, 0xf8eafecd, 0xff75fa94, + 0x082e0009, 0x01f100fe, 0xfd97ff7a, 0x0740ff5a, + 0x0496083e, 0xfde002af, 0xf71afd5c, 0x0407089d, + 0x0258f991, 0xfe46fdc9, 0x073d0448, 0xfe00f8f0, + 0xfd2cfd2c, 0xfce00500, 0xfc09fddc, 0xfe680157, + 0x04c70571, 0xfc3aff21, 0xfcd70228, 0x056d0277, + 0x0200fe00, 0x0022f927, 0xfe3c032b, 0xfc44ff3c, + 0x03e9fbdb, 0x04570313, 0x04c9ff5c, 0x000d03b8, + 0xfa580000, 0xfbe900d2, 0xf9d0fe0b, 0x0125fdf9, + 0x042501bf, 0x0328fa2b, 0xffa902f0, 0xfa250157, + 0x0200fe00, 0x03740438, 0xff0405fd, 0x030cfe52, + 0x0037fb39, 0xff6904c5, 0x04f8fd23, 0xfd31fc1b, + 0xfd2cfd2c, 0xfc1bfd31, 0xfd2304f8, 0x04c5ff69, + 0xfb390037, 0xfe52030c, 0x05fdff04, 0x04380374, + 0xfe000200, 0x0157fa25, 0x02f0ffa9, 0xfa2b0328, + 0x01bf0425, 0xfdf90125, 0xfe0bf9d0, 0x00d2fbe9, + 0x0000fa58, 0x03b8000d, 0xff5c04c9, 0x03130457, + 0xfbdb03e9, 0xff3cfc44, 0x032bfe3c, 0xf9270022, + 0xfe000200, 0x0277056d, 0x0228fcd7, 0xff21fc3a, + 0x057104c7, 0x0157fe68, 0xfddcfc09, 0x0500fce0, + 0xfd2cfd2c, 0x0500fce0, 0xfddcfc09, 0x0157fe68, + 0x057104c7, 0xff21fc3a, 0x0228fcd7, 0x0277056d, + 0xfe000200, 0xf9270022, 0x032bfe3c, 0xff3cfc44, + 0xfbdb03e9, 0x03130457, 0xff5c04c9, 0x03b8000d, + 0x0000fa58, 0x00d2fbe9, 0xfe0bf9d0, 0xfdf90125, + 0x01bf0425, 0xfa2b0328, 0x02f0ffa9, 0x0157fa25, + 0xfe000200, 0x04380374, 0x05fdff04, 0xfe52030c, + 0xfb390037, 0x04c5ff69, 0xfd2304f8, 0xfc1bfd31, + 0xfd2cfd2c, 0xfd31fc1b, 0x04f8fd23, 0xff6904c5, + 0x0037fb39, 0x030cfe52, 0xff0405fd, 0x03740438, + 0x0200fe00, 0xfa250157, 0xffa902f0, 0x0328fa2b, + 0x042501bf, 0x0125fdf9, 0xf9d0fe0b, 0xfbe900d2, + 0xfa580000, 0x000d03b8, 0x04c9ff5c, 0x04570313, + 0x03e9fbdb, 0xfc44ff3c, 0xfe3c032b, 0x0022f927, + 0x0200fe00, 0x056d0277, 0xfcd70228, 0xfc3aff21, + 0x04c70571, 0xfe680157, 0xfc09fddc, 0xfce00500, + 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e, + 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c, + 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926, + 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942, + 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382, + 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4, + 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da, + 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be, + 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e, + 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c, + 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926, + 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942, + 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382, + 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4, + 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da, + 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be, +}; + +static const u32 b43_ntab_noisevar0_r3[] = { + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, +}; + +static const u32 b43_ntab_noisevar1_r3[] = { + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, + 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, +}; + +static const u16 b43_ntab_mcs_r3[] = { + 0x0000, 0x0008, 0x000a, 0x0010, 0x0012, 0x0019, + 0x001a, 0x001c, 0x0080, 0x0088, 0x008a, 0x0090, + 0x0092, 0x0099, 0x009a, 0x009c, 0x0100, 0x0108, + 0x010a, 0x0110, 0x0112, 0x0119, 0x011a, 0x011c, + 0x0180, 0x0188, 0x018a, 0x0190, 0x0192, 0x0199, + 0x019a, 0x019c, 0x0000, 0x0098, 0x00a0, 0x00a8, + 0x009a, 0x00a2, 0x00aa, 0x0120, 0x0128, 0x0128, + 0x0130, 0x0138, 0x0138, 0x0140, 0x0122, 0x012a, + 0x012a, 0x0132, 0x013a, 0x013a, 0x0142, 0x01a8, + 0x01b0, 0x01b8, 0x01b0, 0x01b8, 0x01c0, 0x01c8, + 0x01c0, 0x01c8, 0x01d0, 0x01d0, 0x01d8, 0x01aa, + 0x01b2, 0x01ba, 0x01b2, 0x01ba, 0x01c2, 0x01ca, + 0x01c2, 0x01ca, 0x01d2, 0x01d2, 0x01da, 0x0001, + 0x0002, 0x0004, 0x0009, 0x000c, 0x0011, 0x0014, + 0x0018, 0x0020, 0x0021, 0x0022, 0x0024, 0x0081, + 0x0082, 0x0084, 0x0089, 0x008c, 0x0091, 0x0094, + 0x0098, 0x00a0, 0x00a1, 0x00a2, 0x00a4, 0x0007, + 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, + 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, + 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, + 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, + 0x0007, 0x0007, +}; + +static const u32 b43_ntab_tdi20a0_r3[] = { + 0x00091226, 0x000a1429, 0x000b56ad, 0x000c58b0, + 0x000d5ab3, 0x000e9cb6, 0x000f9eba, 0x0000c13d, + 0x00020301, 0x00030504, 0x00040708, 0x0005090b, + 0x00064b8e, 0x00095291, 0x000a5494, 0x000b9718, + 0x000c9927, 0x000d9b2a, 0x000edd2e, 0x000fdf31, + 0x000101b4, 0x000243b7, 0x000345bb, 0x000447be, + 0x00058982, 0x00068c05, 0x00099309, 0x000a950c, + 0x000bd78f, 0x000cd992, 0x000ddb96, 0x000f1d99, + 0x00005fa8, 0x0001422c, 0x0002842f, 0x00038632, + 0x00048835, 0x0005ca38, 0x0006ccbc, 0x0009d3bf, + 0x000b1603, 0x000c1806, 0x000d1a0a, 0x000e1c0d, + 0x000f5e10, 0x00008093, 0x00018297, 0x0002c49a, + 0x0003c680, 0x0004c880, 0x00060b00, 0x00070d00, + 0x00000000, 0x00000000, 0x00000000, +}; + +static const u32 b43_ntab_tdi20a1_r3[] = { + 0x00014b26, 0x00028d29, 0x000393ad, 0x00049630, + 0x0005d833, 0x0006da36, 0x00099c3a, 0x000a9e3d, + 0x000bc081, 0x000cc284, 0x000dc488, 0x000f068b, + 0x0000488e, 0x00018b91, 0x0002d214, 0x0003d418, + 0x0004d6a7, 0x000618aa, 0x00071aae, 0x0009dcb1, + 0x000b1eb4, 0x000c0137, 0x000d033b, 0x000e053e, + 0x000f4702, 0x00008905, 0x00020c09, 0x0003128c, + 0x0004148f, 0x00051712, 0x00065916, 0x00091b19, + 0x000a1d28, 0x000b5f2c, 0x000c41af, 0x000d43b2, + 0x000e85b5, 0x000f87b8, 0x0000c9bc, 0x00024cbf, + 0x00035303, 0x00045506, 0x0005978a, 0x0006998d, + 0x00095b90, 0x000a5d93, 0x000b9f97, 0x000c821a, + 0x000d8400, 0x000ec600, 0x000fc800, 0x00010a00, + 0x00000000, 0x00000000, 0x00000000, +}; + +static const u32 b43_ntab_tdi40a0_r3[] = { + 0x0011a346, 0x00136ccf, 0x0014f5d9, 0x001641e2, + 0x0017cb6b, 0x00195475, 0x001b2383, 0x001cad0c, + 0x001e7616, 0x0000821f, 0x00020ba8, 0x0003d4b2, + 0x00056447, 0x00072dd0, 0x0008b6da, 0x000a02e3, + 0x000b8c6c, 0x000d15f6, 0x0011e484, 0x0013ae0d, + 0x00153717, 0x00168320, 0x00180ca9, 0x00199633, + 0x001b6548, 0x001ceed1, 0x001eb7db, 0x0000c3e4, + 0x00024d6d, 0x000416f7, 0x0005a585, 0x00076f0f, + 0x0008f818, 0x000a4421, 0x000bcdab, 0x000d9734, + 0x00122649, 0x0013efd2, 0x001578dc, 0x0016c4e5, + 0x00184e6e, 0x001a17f8, 0x001ba686, 0x001d3010, + 0x001ef999, 0x00010522, 0x00028eac, 0x00045835, + 0x0005e74a, 0x0007b0d3, 0x00093a5d, 0x000a85e6, + 0x000c0f6f, 0x000dd8f9, 0x00126787, 0x00143111, + 0x0015ba9a, 0x00170623, 0x00188fad, 0x001a5936, + 0x001be84b, 0x001db1d4, 0x001f3b5e, 0x000146e7, + 0x00031070, 0x000499fa, 0x00062888, 0x0007f212, + 0x00097b9b, 0x000ac7a4, 0x000c50ae, 0x000e1a37, + 0x0012a94c, 0x001472d5, 0x0015fc5f, 0x00174868, + 0x0018d171, 0x001a9afb, 0x001c2989, 0x001df313, + 0x001f7c9c, 0x000188a5, 0x000351af, 0x0004db38, + 0x0006aa4d, 0x000833d7, 0x0009bd60, 0x000b0969, + 0x000c9273, 0x000e5bfc, 0x00132a8a, 0x0014b414, + 0x00163d9d, 0x001789a6, 0x001912b0, 0x001adc39, + 0x001c6bce, 0x001e34d8, 0x001fbe61, 0x0001ca6a, + 0x00039374, 0x00051cfd, 0x0006ec0b, 0x00087515, + 0x0009fe9e, 0x000b4aa7, 0x000cd3b1, 0x000e9d3a, + 0x00000000, 0x00000000, +}; + +static const u32 b43_ntab_tdi40a1_r3[] = { + 0x001edb36, 0x000129ca, 0x0002b353, 0x00047cdd, + 0x0005c8e6, 0x000791ef, 0x00091bf9, 0x000aaa07, + 0x000c3391, 0x000dfd1a, 0x00120923, 0x0013d22d, + 0x00155c37, 0x0016eacb, 0x00187454, 0x001a3dde, + 0x001b89e7, 0x001d12f0, 0x001f1cfa, 0x00016b88, + 0x00033492, 0x0004be1b, 0x00060a24, 0x0007d32e, + 0x00095d38, 0x000aec4c, 0x000c7555, 0x000e3edf, + 0x00124ae8, 0x001413f1, 0x0015a37b, 0x00172c89, + 0x0018b593, 0x001a419c, 0x001bcb25, 0x001d942f, + 0x001f63b9, 0x0001ad4d, 0x00037657, 0x0004c260, + 0x00068be9, 0x000814f3, 0x0009a47c, 0x000b2d8a, + 0x000cb694, 0x000e429d, 0x00128c26, 0x001455b0, + 0x0015e4ba, 0x00176e4e, 0x0018f758, 0x001a8361, + 0x001c0cea, 0x001dd674, 0x001fa57d, 0x0001ee8b, + 0x0003b795, 0x0005039e, 0x0006cd27, 0x000856b1, + 0x0009e5c6, 0x000b6f4f, 0x000cf859, 0x000e8462, + 0x00130deb, 0x00149775, 0x00162603, 0x0017af8c, + 0x00193896, 0x001ac49f, 0x001c4e28, 0x001e17b2, + 0x0000a6c7, 0x00023050, 0x0003f9da, 0x00054563, + 0x00070eec, 0x00089876, 0x000a2704, 0x000bb08d, + 0x000d3a17, 0x001185a0, 0x00134f29, 0x0014d8b3, + 0x001667c8, 0x0017f151, 0x00197adb, 0x001b0664, + 0x001c8fed, 0x001e5977, 0x0000e805, 0x0002718f, + 0x00043b18, 0x000586a1, 0x0007502b, 0x0008d9b4, + 0x000a68c9, 0x000bf252, 0x000dbbdc, 0x0011c7e5, + 0x001390ee, 0x00151a78, 0x0016a906, 0x00183290, + 0x0019bc19, 0x001b4822, 0x001cd12c, 0x001e9ab5, + 0x00000000, 0x00000000, +}; + +static const u32 b43_ntab_pilotlt_r3[] = { + 0x76540213, 0x62407351, 0x76543210, 0x76540213, + 0x76540213, 0x76430521, +}; + +static const u32 b43_ntab_channelest_r3[] = { + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x44444444, 0x44444444, 0x44444444, 0x44444444, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, + 0x10101010, 0x10101010, 0x10101010, 0x10101010, +}; + +static const u8 b43_ntab_framelookup_r3[] = { + 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16, + 0x0a, 0x0c, 0x1c, 0x1c, 0x0b, 0x0d, 0x1e, 0x1e, + 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1a, 0x1a, + 0x0e, 0x10, 0x20, 0x28, 0x0f, 0x11, 0x22, 0x2a, +}; + +static const u8 b43_ntab_estimatepowerlt0_r3[] = { + 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51, + 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c, + 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46, + 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f, + 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36, + 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b, + 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a, + 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd, +}; + +static const u8 b43_ntab_estimatepowerlt1_r3[] = { + 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51, + 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c, + 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46, + 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f, + 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36, + 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b, + 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a, + 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd, +}; + +static const u8 b43_ntab_adjustpower0_r3[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u8 b43_ntab_adjustpower1_r3[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static const u32 b43_ntab_gainctl0_r3[] = { + 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e, + 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037, + 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031, + 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040, + 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039, + 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033, + 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e, + 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037, + 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031, + 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c, + 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e, + 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037, + 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031, + 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c, + 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042, + 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b, + 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034, + 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f, + 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e, + 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037, + 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031, + 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c, + 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027, + 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023, + 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e, + 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037, + 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031, + 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c, + 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027, + 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023, + 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f, + 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c, +}; + +static const u32 b43_ntab_gainctl1_r3[] = { + 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e, + 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037, + 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031, + 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040, + 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039, + 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033, + 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e, + 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037, + 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031, + 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c, + 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e, + 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037, + 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031, + 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c, + 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042, + 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b, + 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034, + 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f, + 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e, + 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037, + 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031, + 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c, + 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027, + 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023, + 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e, + 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037, + 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031, + 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c, + 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027, + 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023, + 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f, + 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c, +}; + +static const u32 b43_ntab_iqlt0_r3[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static const u32 b43_ntab_iqlt1_r3[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, +}; + +static const u16 b43_ntab_loftlt0_r3[] = { + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, +}; + +static const u16 b43_ntab_loftlt1_r3[] = { + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, +}; + +/* TX gain tables */ const u32 b43_ntab_tx_gain_rev0_1_2[] = { 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42, 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44, @@ -1635,6 +2709,79 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = { { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */ }; +struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = { + { /* 2GHz */ + { /* PHY rev 3 */ + { 7, 11, 16, 23 }, + { -5, 6, 10, 14 }, + { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, + { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, + 0x627E, + { 0x613F, 0x613F, 0x613F, 0x613F }, + 0x107E, 0x0066, 0x0074, + 0x18, 0x18, 0x18, + 0x020D, 0x5, + }, + { /* PHY rev 4 */ + { 8, 12, 17, 25 }, + { -5, 6, 10, 14 }, + { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, + { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, + 0x527E, + { 0x513F, 0x513F, 0x513F, 0x513F }, + 0x007E, 0x0066, 0x0074, + 0x18, 0x18, 0x18, + 0x01A1, 0x5, + }, + { /* PHY rev 5+ */ + { 9, 13, 18, 26 }, + { -3, 7, 11, 16 }, + { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, + { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, + 0x427E, /* invalid for external LNA! */ + { 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */ + 0x1076, 0x0066, 0x106A, + 0xC, 0xC, 0xC, + 0x01D0, 0x5, + }, + }, + { /* 5GHz */ + { /* PHY rev 3 */ + { 7, 11, 17, 23 }, + { -6, 2, 6, 10 }, + { 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }, + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, + 0x52DE, + { 0x516F, 0x516F, 0x516F, 0x516F }, + 0x00DE, 0x00CA, 0x00CC, + 0x1E, 0x1E, 0x1E, + 0x01A1, 25, + }, + { /* PHY rev 4 */ + { 8, 12, 18, 23 }, + { -5, 2, 6, 10 }, + { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD }, + { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, + 0x629E, + { 0x614F, 0x614F, 0x614F, 0x614F }, + 0x029E, 0x1084, 0x0086, + 0x24, 0x24, 0x24, + 0x0107, 25, + }, + { /* PHY rev 5+ */ + { 6, 10, 16, 21 }, + { -7, 0, 4, 8 }, + { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD }, + { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, + 0x729E, + { 0x714F, 0x714F, 0x714F, 0x714F }, + 0x029E, 0x2084, 0x2086, + 0x24, 0x24, 0x24, + 0x00A9, 25, + }, + }, +}; + static inline void assert_ntab_array_sizes(void) { #undef check @@ -1813,7 +2960,6 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset, #define ntab_upload(dev, offset, data) do { \ b43_ntab_write_bulk(dev, offset, offset##_SIZE, data); \ } while (0) - void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev) { /* Static tables */ @@ -1847,11 +2993,70 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev) ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1); } +#define ntab_upload_r3(dev, offset, data) do { \ + b43_ntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \ + } while (0) void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) { /* Static tables */ - /* TODO */ + ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3); + ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3); + ntab_upload_r3(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3); + ntab_upload_r3(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3); + ntab_upload_r3(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3); + ntab_upload_r3(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3); + ntab_upload_r3(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3); + ntab_upload_r3(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3); + ntab_upload_r3(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3); + ntab_upload_r3(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3); + ntab_upload_r3(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3); + ntab_upload_r3(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3); + ntab_upload_r3(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3); + ntab_upload_r3(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3); + ntab_upload_r3(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3); + ntab_upload_r3(dev, B43_NTAB_C0_ESTPLT_R3, + b43_ntab_estimatepowerlt0_r3); + ntab_upload_r3(dev, B43_NTAB_C1_ESTPLT_R3, + b43_ntab_estimatepowerlt1_r3); + ntab_upload_r3(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3); + ntab_upload_r3(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3); + ntab_upload_r3(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3); + ntab_upload_r3(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3); + ntab_upload_r3(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3); + ntab_upload_r3(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3); + ntab_upload_r3(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3); + ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3); /* Volatile tables */ /* TODO */ } + +struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( + struct b43_wldev *dev, bool ghz5, bool ext_lna) +{ + struct nphy_gain_ctl_workaround_entry *e; + u8 phy_idx; + + B43_WARN_ON(dev->phy.rev < 3); + if (dev->phy.rev >= 5) + phy_idx = 2; + else if (dev->phy.rev == 4) + phy_idx = 1; + else + phy_idx = 0; + + e = &nphy_gain_ctl_workaround[ghz5][phy_idx]; + + /* Only one entry differs for external LNA, so instead making whole + * table 2 times bigger, hack is here + */ + if (!ghz5 && dev->phy.rev >= 5 && ext_lna) { + e->rfseq_init[0] &= 0x0FFF; + e->rfseq_init[1] &= 0x0FFF; + e->rfseq_init[2] &= 0x0FFF; + e->rfseq_init[3] &= 0x0FFF; + e->init_gain &= 0x0FFF; + } + + return e; +} diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h index 4ec593ba3eef..18569367ce43 100644 --- a/drivers/net/wireless/b43/tables_nphy.h +++ b/drivers/net/wireless/b43/tables_nphy.h @@ -35,6 +35,31 @@ struct nphy_rf_control_override_rev3 { u8 val_addr1; }; +struct nphy_gain_ctl_workaround_entry { + s8 lna1_gain[4]; + s8 lna2_gain[4]; + u8 gain_db[10]; + u8 gain_bits[10]; + + u16 init_gain; + u16 rfseq_init[4]; + + u16 cliphi_gain; + u16 clipmd_gain; + u16 cliplo_gain; + + u16 crsmin; + u16 crsminl; + u16 crsminu; + + u16 nbclip; + u16 wlclip; +}; + +/* Get entry with workaround values for gain ctl. Does not return NULL. */ +struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( + struct b43_wldev *dev, bool ghz5, bool ext_lna); + /* Get the NPHY Channel Switch Table entry for a channel. * Returns NULL on failure to find an entry. */ const struct b43_nphy_channeltab_entry_rev2 * @@ -109,6 +134,33 @@ b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq); #define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */ #define B43_NTAB_C1_LOFEEDTH_SIZE 128 +/* Static N-PHY tables, PHY revision >= 3 */ +#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */ +#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */ +#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */ +#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */ +#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */ +#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */ +#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */ +#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */ +#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */ +#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */ +#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */ +#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */ +#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */ +#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */ +#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */ +#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */ +#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */ +#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */ +#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */ +#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */ +#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */ +#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */ +#define B43_NTAB_C1_IQLT_R3 B43_NTAB32(27, 320) /* I/Q lookup 1 */ +#define B43_NTAB_C0_LOFEEDTH_R3 B43_NTAB16(26, 448) /* Local Oscillator Feed Through lookup 0 */ +#define B43_NTAB_C1_LOFEEDTH_R3 B43_NTAB16(27, 448) /* Local Oscillator Feed Through lookup 1 */ + #define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18 #define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18 #define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18 diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index e6b0528f3b52..e5be381c17bc 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c @@ -32,6 +32,36 @@ #include "dma.h" #include "pio.h" +static const struct b43_tx_legacy_rate_phy_ctl_entry b43_tx_legacy_rate_phy_ctl[] = { + { B43_CCK_RATE_1MB, 0x0, 0x0 }, + { B43_CCK_RATE_2MB, 0x0, 0x1 }, + { B43_CCK_RATE_5MB, 0x0, 0x2 }, + { B43_CCK_RATE_11MB, 0x0, 0x3 }, + { B43_OFDM_RATE_6MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_BPSK }, + { B43_OFDM_RATE_9MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_BPSK }, + { B43_OFDM_RATE_12MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QPSK }, + { B43_OFDM_RATE_18MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QPSK }, + { B43_OFDM_RATE_24MB, B43_TXH_PHY1_CRATE_1_2, B43_TXH_PHY1_MODUL_QAM16 }, + { B43_OFDM_RATE_36MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM16 }, + { B43_OFDM_RATE_48MB, B43_TXH_PHY1_CRATE_2_3, B43_TXH_PHY1_MODUL_QAM64 }, + { B43_OFDM_RATE_54MB, B43_TXH_PHY1_CRATE_3_4, B43_TXH_PHY1_MODUL_QAM64 }, +}; + +static const struct b43_tx_legacy_rate_phy_ctl_entry * +b43_tx_legacy_rate_phy_ctl_ent(u8 bitrate) +{ + const struct b43_tx_legacy_rate_phy_ctl_entry *e; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(b43_tx_legacy_rate_phy_ctl); i++) { + e = &(b43_tx_legacy_rate_phy_ctl[i]); + if (e->bitrate == bitrate) + return e; + } + + B43_WARN_ON(1); + return NULL; +} /* Extract the bitrate index out of a CCK PLCP header. */ static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp) @@ -145,6 +175,34 @@ void b43_generate_plcp_hdr(struct b43_plcp_hdr4 *plcp, } } +static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate) +{ + const struct b43_phy *phy = &dev->phy; + const struct b43_tx_legacy_rate_phy_ctl_entry *e; + u16 control = 0; + u16 bw; + + if (phy->type == B43_PHYTYPE_LP) + bw = B43_TXH_PHY1_BW_20; + else /* FIXME */ + bw = B43_TXH_PHY1_BW_20; + + if (0) { /* FIXME: MIMO */ + } else if (b43_is_cck_rate(bitrate) && phy->type != B43_PHYTYPE_LP) { + control = bw; + } else { + control = bw; + e = b43_tx_legacy_rate_phy_ctl_ent(bitrate); + if (e) { + control |= e->coding_rate; + control |= e->modulation; + } + control |= B43_TXH_PHY1_MODE_SISO; + } + + return control; +} + static u8 b43_calc_fallback_rate(u8 bitrate) { switch (bitrate) { @@ -437,6 +495,14 @@ int b43_generate_txhdr(struct b43_wldev *dev, extra_ft |= B43_TXH_EFT_RTSFB_OFDM; else extra_ft |= B43_TXH_EFT_RTSFB_CCK; + + if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS && + phy->type == B43_PHYTYPE_N) { + txhdr->phy_ctl1_rts = cpu_to_le16( + b43_generate_tx_phy_ctl1(dev, rts_rate)); + txhdr->phy_ctl1_rts_fb = cpu_to_le16( + b43_generate_tx_phy_ctl1(dev, rts_rate_fb)); + } } /* Magic cookie */ @@ -445,6 +511,13 @@ int b43_generate_txhdr(struct b43_wldev *dev, else txhdr->new_format.cookie = cpu_to_le16(cookie); + if (phy->type == B43_PHYTYPE_N) { + txhdr->phy_ctl1 = + cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate)); + txhdr->phy_ctl1_fb = + cpu_to_le16(b43_generate_tx_phy_ctl1(dev, rate_fb)); + } + /* Apply the bitfields */ txhdr->mac_ctl = cpu_to_le32(mac_ctl); txhdr->phy_ctl = cpu_to_le16(phy_ctl); @@ -652,7 +725,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) status.mactime += mactime; if (low_mactime_now <= mactime) status.mactime -= 0x10000; - status.flag |= RX_FLAG_TSFT; + status.flag |= RX_FLAG_MACTIME_MPDU; } chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h index d4cf9b390af3..42debb5cd6fa 100644 --- a/drivers/net/wireless/b43/xmit.h +++ b/drivers/net/wireless/b43/xmit.h @@ -73,6 +73,12 @@ struct b43_txhdr { } __packed; } __packed; +struct b43_tx_legacy_rate_phy_ctl_entry { + u8 bitrate; + u16 coding_rate; + u16 modulation; +}; + /* MAC TX control */ #define B43_TXH_MAC_USEFBR 0x10000000 /* Use fallback rate for this AMPDU */ #define B43_TXH_MAC_KEYIDX 0x0FF00000 /* Security key index */ diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index 1f11e1670bf0..c7fd73e3ad76 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c @@ -2442,8 +2442,8 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl) return err; } -static int b43legacy_op_tx(struct ieee80211_hw *hw, - struct sk_buff *skb) +static void b43legacy_op_tx(struct ieee80211_hw *hw, + struct sk_buff *skb) { struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); struct b43legacy_wldev *dev = wl->current_dev; @@ -2466,7 +2466,6 @@ out: /* Drop the packet. */ dev_kfree_skb_any(skb); } - return NETDEV_TX_OK; } static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue, diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c index 7d177d97f1f7..3a95541708a6 100644 --- a/drivers/net/wireless/b43legacy/xmit.c +++ b/drivers/net/wireless/b43legacy/xmit.c @@ -572,7 +572,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev, status.mactime += mactime; if (low_mactime_now <= mactime) status.mactime -= 0x10000; - status.flag |= RX_FLAG_TSFT; + status.flag |= RX_FLAG_MACTIME_MPDU; } chanid = (chanstat & B43legacy_RX_CHAN_ID) >> diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 471a52a2f8d4..4b97f918daff 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -1396,7 +1396,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv) } /* - * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it + * Send the CARD_DISABLE_PHY_OFF command to the card to disable it * * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent. * diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h index 0441445b8bfa..91795b5a93c5 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/ipw2x00/ipw2200.h @@ -961,7 +961,7 @@ struct ipw_country_channel_info { struct ipw_country_info { u8 id; u8 length; - u8 country_str[3]; + u8 country_str[IEEE80211_COUNTRY_STRING_LEN]; struct ipw_country_channel_info groups[7]; } __packed; diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig new file mode 100644 index 000000000000..2a45dd44cc12 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/Kconfig @@ -0,0 +1,116 @@ +config IWLWIFI_LEGACY + tristate "Intel Wireless Wifi legacy devices" + depends on PCI && MAC80211 + select FW_LOADER + select NEW_LEDS + select LEDS_CLASS + select LEDS_TRIGGERS + select MAC80211_LEDS + +menu "Debugging Options" + depends on IWLWIFI_LEGACY + +config IWLWIFI_LEGACY_DEBUG + bool "Enable full debugging output in 4965 and 3945 drivers" + depends on IWLWIFI_LEGACY + ---help--- + This option will enable debug tracing output for the iwlwifilegacy + drivers. + + This will result in the kernel module being ~100k larger. You can + control which debug output is sent to the kernel log by setting the + value in + + /sys/class/net/wlan0/device/debug_level + + This entry will only exist if this option is enabled. + + To set a value, simply echo an 8-byte hex value to the same file: + + % echo 0x43fff > /sys/class/net/wlan0/device/debug_level + + You can find the list of debug mask values in: + drivers/net/wireless/iwlwifilegacy/iwl-debug.h + + If this is your first time using this driver, you should say Y here + as the debug information can assist others in helping you resolve + any problems you may encounter. + +config IWLWIFI_LEGACY_DEBUGFS + bool "4965 and 3945 debugfs support" + depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS + ---help--- + Enable creation of debugfs files for the iwlwifilegacy drivers. This + is a low-impact option that allows getting insight into the + driver's state at runtime. + +config IWLWIFI_LEGACY_DEVICE_TRACING + bool "iwlwifilegacy legacy device access tracing" + depends on IWLWIFI_LEGACY + depends on EVENT_TRACING + help + Say Y here to trace all commands, including TX frames and IO + accesses, sent to the device. If you say yes, iwlwifilegacy will + register with the ftrace framework for event tracing and dump + all this information to the ringbuffer, you may need to + increase the ringbuffer size. See the ftrace documentation + for more information. + + When tracing is not enabled, this option still has some + (though rather small) overhead. + + If unsure, say Y so we can help you better when problems + occur. +endmenu + +config IWL4965 + tristate "Intel Wireless WiFi 4965AGN (iwl4965)" + depends on IWLWIFI_LEGACY + ---help--- + This option enables support for + + Select to build the driver supporting the: + + Intel Wireless WiFi Link 4965AGN + + This driver uses the kernel's mac80211 subsystem. + + In order to use this driver, you will need a microcode (uCode) + image for it. You can obtain the microcode from: + + <http://intellinuxwireless.org/>. + + The microcode is typically installed in /lib/firmware. You can + look in the hotplug script /etc/hotplug/firmware.agent to + determine which directory FIRMWARE_DIR is set to when the script + runs. + + If you want to compile the driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read <file:Documentation/kbuild/modules.txt>. The + module will be called iwl4965. + +config IWL3945 + tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" + depends on IWLWIFI_LEGACY + ---help--- + Select to build the driver supporting the: + + Intel PRO/Wireless 3945ABG/BG Network Connection + + This driver uses the kernel's mac80211 subsystem. + + In order to use this driver, you will need a microcode (uCode) + image for it. You can obtain the microcode from: + + <http://intellinuxwireless.org/>. + + The microcode is typically installed in /lib/firmware. You can + look in the hotplug script /etc/hotplug/firmware.agent to + determine which directory FIRMWARE_DIR is set to when the script + runs. + + If you want to compile the driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read <file:Documentation/kbuild/modules.txt>. The + module will be called iwl3945. diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile new file mode 100644 index 000000000000..d56aeb38c211 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/Makefile @@ -0,0 +1,25 @@ +obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o +iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o +iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o +iwl-legacy-objs += iwl-scan.o iwl-led.o +iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o +iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o + +iwl-legacy-objs += $(iwl-legacy-m) + +CFLAGS_iwl-devtrace.o := -I$(src) + +# 4965 +obj-$(CONFIG_IWL4965) += iwl4965.o +iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o +iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o +iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o +iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o +iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o + +# 3945 +obj-$(CONFIG_IWL3945) += iwl3945.o +iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o +iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c index ef0835b01b6b..cfabb38793ab 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c @@ -2,7 +2,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -60,12 +60,13 @@ ssize_t iwl3945_ucode_rx_stats_read(struct file *file, int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 + sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400; ssize_t ret; - struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; + struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, + *max_ofdm; struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; struct iwl39_statistics_rx_non_phy *general, *accum_general; struct iwl39_statistics_rx_non_phy *delta_general, *max_general; - if (!iwl_is_alive(priv)) + if (!iwl_legacy_is_alive(priv)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); @@ -335,7 +336,7 @@ ssize_t iwl3945_ucode_tx_stats_read(struct file *file, ssize_t ret; struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; - if (!iwl_is_alive(priv)) + if (!iwl_legacy_is_alive(priv)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); @@ -434,7 +435,7 @@ ssize_t iwl3945_ucode_general_stats_read(struct file *file, struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div; - if (!iwl_is_alive(priv)) + if (!iwl_legacy_is_alive(priv)) return -EAGAIN; buf = kzalloc(bufsz, GFP_KERNEL); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h index 70809c53c215..8fef4b32b447 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h +++ b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h @@ -2,7 +2,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ #include "iwl-core.h" #include "iwl-debug.h" -#ifdef CONFIG_IWLWIFI_DEBUGFS +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos); ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf, diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h index 2c9ed2b502a3..836c9919f82e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h +++ b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -185,4 +185,3 @@ struct iwl3945_tfd { #endif /* __iwl_3945_fh_h__ */ - diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h index 65b5834da28c..779d3cb86e2c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h +++ b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -164,12 +164,11 @@ struct iwl3945_eeprom { /* * Per-channel regulatory data. * - * Each channel that *might* be supported by 3945 or 4965 has a fixed location + * Each channel that *might* be supported by 3945 has a fixed location * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory * txpower (MSB). * - * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) - * channels (only for 4965, not supported by 3945) appear later in the EEPROM. + * Entries immediately below are for 20 MHz channel width. * * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c index abe2b739c4dc..abd923558d48 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -56,36 +56,9 @@ static int iwl3945_send_led_cmd(struct iwl_priv *priv, .callback = NULL, }; - return iwl_send_cmd(priv, &cmd); -} - -/* Set led on command */ -static int iwl3945_led_on(struct iwl_priv *priv) -{ - struct iwl_led_cmd led_cmd = { - .id = IWL_LED_LINK, - .on = IWL_LED_SOLID, - .off = 0, - .interval = IWL_DEF_LED_INTRVL - }; - return iwl3945_send_led_cmd(priv, &led_cmd); -} - -/* Set led off command */ -static int iwl3945_led_off(struct iwl_priv *priv) -{ - struct iwl_led_cmd led_cmd = { - .id = IWL_LED_LINK, - .on = 0, - .off = 0, - .interval = IWL_DEF_LED_INTRVL - }; - IWL_DEBUG_LED(priv, "led off\n"); - return iwl3945_send_led_cmd(priv, &led_cmd); + return iwl_legacy_send_cmd(priv, &cmd); } const struct iwl_led_ops iwl3945_led_ops = { .cmd = iwl3945_send_led_cmd, - .on = iwl3945_led_on, - .off = iwl3945_led_off, }; diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h index ce990adc51e7..96716276eb0d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h +++ b/drivers/net/wireless/iwlegacy/iwl-3945-led.h @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c index 1f3e7e34fbc7..977bd2477c6a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -89,7 +89,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = { }; #define IWL_RATE_MAX_WINDOW 62 -#define IWL_RATE_FLUSH (3*HZ) +#define IWL_RATE_FLUSH (3*HZ) #define IWL_RATE_WIN_FLUSH (HZ/2) #define IWL39_RATE_HIGH_TH 11520 #define IWL_SUCCESS_UP_TH 8960 @@ -394,18 +394,18 @@ out: IWL_DEBUG_INFO(priv, "leave\n"); } -static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) { return hw->priv; } /* rate scale requires free function to be implemented */ -static void rs_free(void *priv) +static void iwl3945_rs_free(void *priv) { return; } -static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp) +static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp) { struct iwl3945_rs_sta *rs_sta; struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; @@ -423,7 +423,7 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp) return rs_sta; } -static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, +static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, void *priv_sta) { struct iwl3945_rs_sta *rs_sta = priv_sta; @@ -438,12 +438,12 @@ static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, /** - * rs_tx_status - Update rate control values based on Tx results + * iwl3945_rs_tx_status - Update rate control values based on Tx results * * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by * the hardware for each rate. */ -static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband, +static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta, struct sk_buff *skb) { @@ -612,7 +612,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta, } /** - * rs_get_rate - find the rate for the requested packet + * iwl3945_rs_get_rate - find the rate for the requested packet * * Returns the ieee80211_rate structure allocated by the driver. * @@ -627,7 +627,7 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta, * rate table and must reference the driver allocated rate table * */ -static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, +static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, struct ieee80211_tx_rate_control *txrc) { struct ieee80211_supported_band *sband = txrc->sband; @@ -644,7 +644,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, u32 fail_count; s8 scale_action = 0; unsigned long flags; - u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0; + u16 rate_mask; s8 max_rate_idx = -1; struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -899,7 +899,8 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta) * the station is added. Since mac80211 calls this function before a * station is added we ignore it. */ -static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, +static void iwl3945_rs_rate_init_stub(void *priv_r, + struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta) { } @@ -907,13 +908,13 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba static struct rate_control_ops rs_ops = { .module = NULL, .name = RS_NAME, - .tx_status = rs_tx_status, - .get_rate = rs_get_rate, - .rate_init = rs_rate_init_stub, - .alloc = rs_alloc, - .free = rs_free, - .alloc_sta = rs_alloc_sta, - .free_sta = rs_free_sta, + .tx_status = iwl3945_rs_tx_status, + .get_rate = iwl3945_rs_get_rate, + .rate_init = iwl3945_rs_rate_init_stub, + .alloc = iwl3945_rs_alloc, + .free = iwl3945_rs_free, + .alloc_sta = iwl3945_rs_alloc_sta, + .free_sta = iwl3945_rs_free_sta, #ifdef CONFIG_MAC80211_DEBUGFS .add_sta_debugfs = iwl3945_add_debugfs, .remove_sta_debugfs = iwl3945_remove_debugfs, @@ -991,5 +992,3 @@ void iwl3945_rate_control_unregister(void) { ieee80211_rate_control_unregister(&rs_ops); } - - diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c index 39b6f16c87fa..d096dc28204d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlegacy/iwl-3945.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -51,7 +51,6 @@ #include "iwl-led.h" #include "iwl-3945-led.h" #include "iwl-3945-debugfs.h" -#include "iwl-legacy.h" #define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ @@ -172,14 +171,14 @@ void iwl3945_disable_events(struct iwl_priv *priv) return; } - disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32))); - array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32))); + disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32))); + array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32))); if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) { IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n", disable_ptr); for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++) - iwl_write_targ_mem(priv, + iwl_legacy_write_targ_mem(priv, disable_ptr + (i * sizeof(u32)), evt_disable[i]); @@ -202,7 +201,7 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp) return -1; } -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG #define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x static const char *iwl3945_get_tx_fail_reason(u32 status) @@ -255,7 +254,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate) break; case IEEE80211_BAND_2GHZ: if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && - iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { + iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { if (rate == IWL_RATE_11M_INDEX) next_rate = IWL_RATE_5M_INDEX; } @@ -285,8 +284,9 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv, BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM); - for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd); + q->read_ptr != index; + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { tx_info = &txq->txb[txq->q.read_ptr]; ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); @@ -294,10 +294,10 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv, priv->cfg->ops->lib->txq_free_tfd(priv, txq); } - if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && + if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) && (txq_id != IWL39_CMD_QUEUE_NUM) && priv->mac80211_registered) - iwl_wake_queue(priv, txq); + iwl_legacy_wake_queue(priv, txq); } /** @@ -317,7 +317,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv, int rate_idx; int fail; - if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { + if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) { IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " "is out of range [0-%d] %d %d\n", txq_id, index, txq->q.n_bd, txq->q.write_ptr, @@ -363,12 +363,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv, * RX handler implementations * *****************************************************************************/ -#ifdef CONFIG_IWLWIFI_DEBUGFS -/* - * based on the assumption of all statistics counter are in DWORD - * FIXME: This function is for debugging, do not deal with - * the case of counters roll-over. - */ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS static void iwl3945_accumulative_statistics(struct iwl_priv *priv, __le32 *stats) { @@ -410,10 +405,10 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv, IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", (int)sizeof(struct iwl3945_notif_statistics), le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); -#ifdef CONFIG_IWLWIFI_DEBUGFS +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw); #endif - iwl_recover_from_statistics(priv, pkt); + iwl_legacy_recover_from_statistics(priv, pkt); memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics)); } @@ -425,7 +420,7 @@ void iwl3945_reply_statistics(struct iwl_priv *priv, __le32 *flag = (__le32 *)&pkt->u.raw; if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) { -#ifdef CONFIG_IWLWIFI_DEBUGFS +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS memset(&priv->_3945.accum_statistics, 0, sizeof(struct iwl3945_notif_statistics)); memset(&priv->_3945.delta_statistics, 0, @@ -496,14 +491,14 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv, } if (!iwl3945_mod_params.sw_crypto) - iwl_set_decrypted_flag(priv, + iwl_legacy_set_decrypted_flag(priv, (struct ieee80211_hdr *)rxb_addr(rxb), le32_to_cpu(rx_end->status), stats); skb_add_rx_frag(skb, 0, rxb->page, (void *)rx_hdr->payload - (void *)pkt, len); - iwl_update_stats(priv, false, fc, len); + iwl_legacy_update_stats(priv, false, fc, len); memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); ieee80211_rx(priv->hw, skb); @@ -528,10 +523,11 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv, rx_status.flag = 0; rx_status.mactime = le64_to_cpu(rx_end->timestamp); - rx_status.freq = - ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel)); rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel), + rx_status.band); rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate); if (rx_status.band == IEEE80211_BAND_5GHZ) @@ -575,7 +571,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv, rx_status.signal, rx_status.signal, rx_status.rate_idx); - iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header); + iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), + header); if (network_packet) { priv->_3945.last_beacon_time = @@ -695,8 +692,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, /* We need to figure out how to get the sta->supp_rates while * in this running context */ - rate_mask = IWL_RATES_MASK; - + rate_mask = IWL_RATES_MASK_3945; /* Set retry limit on DATA packets and Probe Responses*/ if (ieee80211_is_probe_resp(fc)) @@ -744,7 +740,7 @@ static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate) station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK; station->sta.rate_n_flags = cpu_to_le16(tx_rate); station->sta.mode = STA_CONTROL_MODIFY_MSK; - iwl_send_add_sta(priv, &station->sta, CMD_ASYNC); + iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC); spin_unlock_irqrestore(&priv->sta_lock, flags_spin); IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n", @@ -759,7 +755,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv) * to set power to V_AUX, do if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) { - iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VAUX, ~APMG_PS_CTRL_MSK_PWR_SRC); @@ -769,7 +765,7 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv) } */ - iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~APMG_PS_CTRL_MSK_PWR_SRC); @@ -779,10 +775,11 @@ static void iwl3945_set_pwr_vmain(struct iwl_priv *priv) static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) { - iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); - iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma); - iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0); - iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), + iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma); + iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), + rxq->rb_stts_dma); + iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0); + iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE | FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE | FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN | @@ -793,7 +790,7 @@ static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH); /* fake read to flush all prev I/O */ - iwl_read_direct32(priv, FH39_RSSR_CTRL); + iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL); return 0; } @@ -802,23 +799,23 @@ static int iwl3945_tx_reset(struct iwl_priv *priv) { /* bypass mode */ - iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2); + iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2); /* RA 0 is active */ - iwl_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01); + iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01); /* all 6 fifo are active */ - iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f); + iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f); - iwl_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000); - iwl_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002); - iwl_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004); - iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); + iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000); + iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002); + iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004); + iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); - iwl_write_direct32(priv, FH39_TSSR_CBB_BASE, + iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE, priv->_3945.shared_phys); - iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG, + iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG, FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON | FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B | @@ -844,7 +841,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv) iwl3945_hw_txq_ctx_free(priv); /* allocate tx queue structure */ - rc = iwl_alloc_txq_mem(priv); + rc = iwl_legacy_alloc_txq_mem(priv); if (rc) return rc; @@ -857,8 +854,8 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv) for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, - txq_id); + rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id], + slots_num, txq_id); if (rc) { IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); goto error; @@ -875,21 +872,23 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv) /* * Start up 3945's basic functionality after it has been reset - * (e.g. after platform boot, or shutdown via iwl_apm_stop()) + * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop()) * NOTE: This does not load uCode nor start the embedded processor */ static int iwl3945_apm_init(struct iwl_priv *priv) { - int ret = iwl_apm_init(priv); + int ret = iwl_legacy_apm_init(priv); /* Clear APMG (NIC's internal power management) interrupts */ - iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0); - iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); + iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0); + iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF); /* Reset radio chip */ - iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); + iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); udelay(5); - iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ); + iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); return ret; } @@ -898,30 +897,28 @@ static void iwl3945_nic_config(struct iwl_priv *priv) { struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; unsigned long flags; - u8 rev_id = 0; + u8 rev_id = priv->pci_dev->revision; spin_lock_irqsave(&priv->lock, flags); /* Determine HW type */ - pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id); - IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); if (rev_id & PCI_CFG_REV_ID_BIT_RTP) IWL_DEBUG_INFO(priv, "RTP type\n"); else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BIT_3945_MB); } else { IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n"); - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BIT_3945_MM); } if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) { IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n"); - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC); } else IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n"); @@ -929,24 +926,24 @@ static void iwl3945_nic_config(struct iwl_priv *priv) if ((eeprom->board_revision & 0xF0) == 0xD0) { IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", eeprom->board_revision); - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); } else { IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n", eeprom->board_revision); - iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); } if (eeprom->almgor_m_version <= 1) { - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n", eeprom->almgor_m_version); } else { IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n", eeprom->almgor_m_version); - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); } spin_unlock_irqrestore(&priv->lock, flags); @@ -974,7 +971,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv) /* Allocate the RX queue, or reset if it is already allocated */ if (!rxq->bd) { - rc = iwl_rx_queue_alloc(priv); + rc = iwl_legacy_rx_queue_alloc(priv); if (rc) { IWL_ERR(priv, "Unable to initialize Rx queue\n"); return -ENOMEM; @@ -989,10 +986,10 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv) /* Look at using this instead: rxq->need_update = 1; - iwl_rx_queue_update_write_ptr(priv, rxq); + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); */ - iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7); + iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7); rc = iwl3945_txq_ctx_reset(priv); if (rc) @@ -1017,12 +1014,12 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv) for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) if (txq_id == IWL39_CMD_QUEUE_NUM) - iwl_cmd_queue_free(priv); + iwl_legacy_cmd_queue_free(priv); else - iwl_tx_queue_free(priv, txq_id); + iwl_legacy_tx_queue_free(priv, txq_id); /* free tx queue structure */ - iwl_free_txq_mem(priv); + iwl_legacy_txq_mem(priv); } void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) @@ -1030,12 +1027,12 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) int txq_id; /* stop SCD */ - iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); - iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0); + iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0); + iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0); /* reset TFD queues */ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { - iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); + iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0); iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS, FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id), 1000); @@ -1102,12 +1099,12 @@ static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv) #define IWL_TEMPERATURE_LIMIT_TIMER 6 /** - * is_temp_calib_needed - determines if new calibration is needed + * iwl3945_is_temp_calib_needed - determines if new calibration is needed * * records new temperature in tx_mgr->temperature. * replaces tx_mgr->last_temperature *only* if calib needed * (assumes caller will actually do the calibration!). */ -static int is_temp_calib_needed(struct iwl_priv *priv) +static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv) { int temp_diff; @@ -1338,9 +1335,6 @@ static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_in * based on eeprom channel data) for this channel. */ power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]); - /* further limit to user's max power preference. - * FIXME: Other spectrum management power limitations do not - * seem to apply?? */ power = min(power, priv->tx_power_user_lmt); scan_power_info->requested_power = power; @@ -1394,7 +1388,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv) chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel); txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; - ch_info = iwl_get_channel_info(priv, priv->band, chan); + ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan); if (!ch_info) { IWL_ERR(priv, "Failed to get channel info for channel %d [%d]\n", @@ -1402,7 +1396,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv) return -EINVAL; } - if (!is_channel_valid(ch_info)) { + if (!iwl_legacy_is_channel_valid(ch_info)) { IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on " "non-Tx channel.\n"); return 0; @@ -1437,7 +1431,7 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv) txpower.power[i].rate); } - return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, + return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(struct iwl3945_txpowertable_cmd), &txpower); @@ -1571,7 +1565,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv) /* set up new Tx power info for each and every channel, 2.4 and 5.x */ for (i = 0; i < priv->channel_count; i++) { ch_info = &priv->channel_info[i]; - a_band = is_channel_a_band(ch_info); + a_band = iwl_legacy_is_channel_a_band(ch_info); /* Get this chnlgrp's factory calibration temperature */ ref_temp = (s16)eeprom->groups[ch_info->group_index]. @@ -1583,7 +1577,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv) ref_temp); /* set tx power value for all rates, OFDM and CCK */ - for (rate_index = 0; rate_index < IWL_RATE_COUNT; + for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945; rate_index++) { int power_idx = ch_info->power_info[rate_index].base_power_index; @@ -1637,7 +1631,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) for (i = 0; i < priv->channel_count; i++) { ch_info = &priv->channel_info[i]; - a_band = is_channel_a_band(ch_info); + a_band = iwl_legacy_is_channel_a_band(ch_info); /* find minimum power of all user and regulatory constraints * (does not consider h/w clipping limitations) */ @@ -1653,7 +1647,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power) /* update txpower settings for all channels, * send to NIC if associated. */ - is_temp_calib_needed(priv); + iwl3945_is_temp_calib_needed(priv); iwl3945_hw_reg_comp_txpower_temp(priv); return 0; @@ -1671,8 +1665,8 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv, .flags = CMD_WANT_SKB, .data = &rxon_assoc, }; - const struct iwl_rxon_cmd *rxon1 = &ctx->staging; - const struct iwl_rxon_cmd *rxon2 = &ctx->active; + const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging; + const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active; if ((rxon1->flags == rxon2->flags) && (rxon1->filter_flags == rxon2->filter_flags) && @@ -1688,7 +1682,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv, rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; rxon_assoc.reserved = 0; - rc = iwl_send_cmd_sync(priv, &cmd); + rc = iwl_legacy_send_cmd_sync(priv, &cmd); if (rc) return rc; @@ -1698,7 +1692,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv, rc = -EIO; } - iwl_free_pages(priv, cmd.reply_page); + iwl_legacy_free_pages(priv, cmd.reply_page); return rc; } @@ -1722,7 +1716,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return -EINVAL; - if (!iwl_is_alive(priv)) + if (!iwl_legacy_is_alive(priv)) return -1; /* always get timestamp with Rx frame */ @@ -1733,7 +1727,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); staging_rxon->flags |= iwl3945_get_antenna_flags(priv); - rc = iwl_check_rxon_cmd(priv, ctx); + rc = iwl_legacy_check_rxon_cmd(priv, ctx); if (rc) { IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); return -EINVAL; @@ -1742,8 +1736,9 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) /* If we don't need to send a full RXON, we can use * iwl3945_rxon_assoc_cmd which is used to reconfigure filter * and other flags for the current radio configuration. */ - if (!iwl_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) { - rc = iwl_send_rxon_assoc(priv, + if (!iwl_legacy_full_rxon_required(priv, + &priv->contexts[IWL_RXON_CTX_BSS])) { + rc = iwl_legacy_send_rxon_assoc(priv, &priv->contexts[IWL_RXON_CTX_BSS]); if (rc) { IWL_ERR(priv, "Error setting RXON_ASSOC " @@ -1760,7 +1755,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) * an RXON_ASSOC and the new config wants the associated mask enabled, * we must clear the associated from the active configuration * before we apply the new config */ - if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) { + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) { IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; @@ -1770,7 +1765,7 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) */ active_rxon->reserved4 = 0; active_rxon->reserved5 = 0; - rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON, sizeof(struct iwl3945_rxon_cmd), &priv->contexts[IWL_RXON_CTX_BSS].active); @@ -1782,9 +1777,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) "configuration (%d).\n", rc); return rc; } - iwl_clear_ucode_stations(priv, + iwl_legacy_clear_ucode_stations(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + iwl_legacy_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]); - iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]); } IWL_DEBUG_INFO(priv, "Sending RXON\n" @@ -1802,10 +1798,10 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) staging_rxon->reserved4 = 0; staging_rxon->reserved5 = 0; - iwl_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto); + iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto); /* Apply the new configuration */ - rc = iwl_send_cmd_pdu(priv, REPLY_RXON, + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON, sizeof(struct iwl3945_rxon_cmd), staging_rxon); if (rc) { @@ -1816,14 +1812,15 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); if (!new_assoc) { - iwl_clear_ucode_stations(priv, + iwl_legacy_clear_ucode_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]); - iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]); + iwl_legacy_restore_stations(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); } /* If we issue a new RXON command which required a tune then we must * send a new TXPOWER command or we won't be able to Tx any frames */ - rc = priv->cfg->ops->lib->send_tx_power(priv); + rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); if (rc) { IWL_ERR(priv, "Error setting Tx power (%d).\n", rc); return rc; @@ -1853,7 +1850,7 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv) { /* This will kick in the "brute force" * iwl3945_hw_reg_comp_txpower_temp() below */ - if (!is_temp_calib_needed(priv)) + if (!iwl3945_is_temp_calib_needed(priv)) goto reschedule; /* Set up a new set of temp-adjusted TxPowers, send to NIC. @@ -1900,7 +1897,7 @@ static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv, u8 grp_channel; /* Find the group index for the channel ... don't use index 1(?) */ - if (is_channel_a_band(ch_info)) { + if (iwl_legacy_is_channel_a_band(ch_info)) { for (group = 1; group < 5; group++) { grp_channel = ch_grp[group].group_channel; if (ch_info->channel <= grp_channel) { @@ -2080,8 +2077,8 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv) /* initialize Tx power info for each and every channel, 2.4 and 5.x */ for (i = 0, ch_info = priv->channel_info; i < priv->channel_count; i++, ch_info++) { - a_band = is_channel_a_band(ch_info); - if (!is_channel_valid(ch_info)) + a_band = iwl_legacy_is_channel_a_band(ch_info); + if (!iwl_legacy_is_channel_valid(ch_info)) continue; /* find this channel's channel group (*not* "band") index */ @@ -2184,7 +2181,7 @@ int iwl3945_hw_rxq_stop(struct iwl_priv *priv) { int rc; - iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0); + iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0); rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS, FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); if (rc < 0) @@ -2201,10 +2198,10 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); - iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0); - iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0); + iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0); + iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0); - iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), + iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT | FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF | FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD | @@ -2233,7 +2230,8 @@ static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len) } -static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) +static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd, + u8 *data) { struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data; addsta->mode = cmd->mode; @@ -2261,7 +2259,7 @@ static int iwl3945_add_bssid_station(struct iwl_priv *priv, if (sta_id_r) *sta_id_r = IWL_INVALID_STATION; - ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); + ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); if (ret) { IWL_ERR(priv, "Unable to add station %pM\n", addr); return ret; @@ -2296,7 +2294,7 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv, return 0; } - return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id, + return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id, vif->bss_conf.bssid); } @@ -2347,7 +2345,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv) * 1M CCK rates */ if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) && - iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { + iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { index = IWL_FIRST_CCK_RATE; for (i = IWL_RATE_6M_INDEX_TABLE; @@ -2368,14 +2366,14 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv) /* Update the rate scaling for control frame Tx */ rate_cmd.table_id = 0; - rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), &rate_cmd); if (rc) return rc; /* Update the rate scaling for data frame Tx */ rate_cmd.table_id = 1; - return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), + return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd), &rate_cmd); } @@ -2475,11 +2473,11 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv) IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); /* verify BSM SRAM contents */ - val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); + val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG); for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len; reg += sizeof(u32), image++) { - val = iwl_read_prph(priv, reg); + val = iwl_legacy_read_prph(priv, reg); if (val != le32_to_cpu(*image)) { IWL_ERR(priv, "BSM uCode verification failed at " "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", @@ -2512,7 +2510,7 @@ static int iwl3945_verify_bsm(struct iwl_priv *priv) */ static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv) { - _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); + _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK); return 0; } @@ -2583,16 +2581,16 @@ static int iwl3945_load_bsm(struct iwl_priv *priv) inst_len = priv->ucode_init.len; data_len = priv->ucode_init_data.len; - iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); - iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); - iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); - iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); /* Fill BSM memory with bootstrap instructions */ for (reg_offset = BSM_SRAM_LOWER_BOUND; reg_offset < BSM_SRAM_LOWER_BOUND + len; reg_offset += sizeof(u32), image++) - _iwl_write_prph(priv, reg_offset, + _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image)); rc = iwl3945_verify_bsm(priv); @@ -2600,19 +2598,19 @@ static int iwl3945_load_bsm(struct iwl_priv *priv) return rc; /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ - iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); - iwl_write_prph(priv, BSM_WR_MEM_DST_REG, + iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG, IWL39_RTC_INST_LOWER_BOUND); - iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); /* Load bootstrap code into instruction SRAM now, * to prepare to load "initialize" uCode */ - iwl_write_prph(priv, BSM_WR_CTRL_REG, + iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); /* Wait for load of bootstrap uCode to finish */ for (i = 0; i < 100; i++) { - done = iwl_read_prph(priv, BSM_WR_CTRL_REG); + done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG); if (!(done & BSM_WR_CTRL_REG_BIT_START)) break; udelay(10); @@ -2626,7 +2624,7 @@ static int iwl3945_load_bsm(struct iwl_priv *priv) /* Enable future boot loads whenever power management unit triggers it * (e.g. when powering back up after power-save shutdown) */ - iwl_write_prph(priv, BSM_WR_CTRL_REG, + iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); return 0; @@ -2635,7 +2633,6 @@ static int iwl3945_load_bsm(struct iwl_priv *priv) static struct iwl_hcmd_ops iwl3945_hcmd = { .rxon_assoc = iwl3945_send_rxon_assoc, .commit_rxon = iwl3945_commit_rxon, - .send_bt_config = iwl_send_bt_config, }; static struct iwl_lib_ops iwl3945_lib = { @@ -2661,13 +2658,9 @@ static struct iwl_lib_ops iwl3945_lib = { }, .acquire_semaphore = iwl3945_eeprom_acquire_semaphore, .release_semaphore = iwl3945_eeprom_release_semaphore, - .query_addr = iwlcore_eeprom_query_addr, }, .send_tx_power = iwl3945_send_tx_power, .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr, - .isr_ops = { - .isr = iwl_isr_legacy, - }, .debugfs_ops = { .rx_stats_read = iwl3945_ucode_rx_stats_read, @@ -2685,7 +2678,6 @@ static const struct iwl_legacy_ops iwl3945_legacy_ops = { static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { .get_hcmd_size = iwl3945_get_hcmd_size, .build_addsta_hcmd = iwl3945_build_addsta_hcmd, - .tx_cmd_protection = iwl_legacy_tx_cmd_protection, .request_scan = iwl3945_request_scan, .post_scan = iwl3945_post_scan, }; @@ -2705,13 +2697,10 @@ static struct iwl_base_params iwl3945_base_params = { .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL, .set_l0s = false, .use_bsm = true, - .use_isr_legacy = true, .led_compensation = 64, - .broken_powersave = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .wd_timeout = IWL_DEF_WD_TIMEOUT, .max_event_log_size = 512, - .tx_power_by_driver = true, }; static struct iwl_cfg iwl3945_bg_cfg = { diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h index 3eef1eb74a78..b118b59b71de 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.h +++ b/drivers/net/wireless/iwlegacy/iwl-3945.h @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -108,7 +108,7 @@ struct iwl3945_rs_sta { /* * The common struct MUST be first because it is shared between - * 3945 and agn! + * 3945 and 4965! */ struct iwl3945_sta_priv { struct iwl_station_priv_common common; @@ -201,7 +201,7 @@ struct iwl3945_ibss_seq { /****************************************************************************** * - * Functions implemented in iwl-base.c which are forward declared here + * Functions implemented in iwl3945-base.c which are forward declared here * for use by iwl-*.c * *****************************************************************************/ @@ -209,7 +209,7 @@ extern int iwl3945_calc_db_from_ratio(int sig_ratio); extern void iwl3945_rx_replenish(void *data); extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, - struct ieee80211_hdr *hdr,int left); + struct ieee80211_hdr *hdr, int left); extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, char **buf, bool display); extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); @@ -217,7 +217,7 @@ extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); /****************************************************************************** * * Functions implemented in iwl-[34]*.c which are forward declared here - * for use by iwl-base.c + * for use by iwl3945-base.c * * NOTE: The implementation of these functions are hardware specific * which is why they are in the hardware specific files (vs. iwl-base.c) @@ -283,7 +283,7 @@ extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid); extern struct ieee80211_ops iwl3945_hw_ops; /* - * Forward declare iwl-3945.c functions for iwl-base.c + * Forward declare iwl-3945.c functions for iwl3945-base.c */ extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv); extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv); diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c new file mode 100644 index 000000000000..81d6a25eb04f --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.c @@ -0,0 +1,967 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#include <linux/slab.h> +#include <net/mac80211.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-4965-calib.h" + +/***************************************************************************** + * INIT calibrations framework + *****************************************************************************/ + +struct statistics_general_data { + u32 beacon_silence_rssi_a; + u32 beacon_silence_rssi_b; + u32 beacon_silence_rssi_c; + u32 beacon_energy_a; + u32 beacon_energy_b; + u32 beacon_energy_c; +}; + +void iwl4965_calib_free_results(struct iwl_priv *priv) +{ + int i; + + for (i = 0; i < IWL_CALIB_MAX; i++) { + kfree(priv->calib_results[i].buf); + priv->calib_results[i].buf = NULL; + priv->calib_results[i].buf_len = 0; + } +} + +/***************************************************************************** + * RUNTIME calibrations framework + *****************************************************************************/ + +/* "false alarms" are signals that our DSP tries to lock onto, + * but then determines that they are either noise, or transmissions + * from a distant wireless network (also "noise", really) that get + * "stepped on" by stronger transmissions within our own network. + * This algorithm attempts to set a sensitivity level that is high + * enough to receive all of our own network traffic, but not so + * high that our DSP gets too busy trying to lock onto non-network + * activity/noise. */ +static int iwl4965_sens_energy_cck(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time, + struct statistics_general_data *rx_info) +{ + u32 max_nrg_cck = 0; + int i = 0; + u8 max_silence_rssi = 0; + u32 silence_ref = 0; + u8 silence_rssi_a = 0; + u8 silence_rssi_b = 0; + u8 silence_rssi_c = 0; + u32 val; + + /* "false_alarms" values below are cross-multiplications to assess the + * numbers of false alarms within the measured period of actual Rx + * (Rx is off when we're txing), vs the min/max expected false alarms + * (some should be expected if rx is sensitive enough) in a + * hypothetical listening period of 200 time units (TU), 204.8 msec: + * + * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time + * + * */ + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; + u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + data = &(priv->sensitivity_data); + + data->nrg_auto_corr_silence_diff = 0; + + /* Find max silence rssi among all 3 receivers. + * This is background noise, which may include transmissions from other + * networks, measured during silence before our network's beacon */ + silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a & + ALL_BAND_FILTER) >> 8); + silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b & + ALL_BAND_FILTER) >> 8); + silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c & + ALL_BAND_FILTER) >> 8); + + val = max(silence_rssi_b, silence_rssi_c); + max_silence_rssi = max(silence_rssi_a, (u8) val); + + /* Store silence rssi in 20-beacon history table */ + data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi; + data->nrg_silence_idx++; + if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L) + data->nrg_silence_idx = 0; + + /* Find max silence rssi across 20 beacon history */ + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) { + val = data->nrg_silence_rssi[i]; + silence_ref = max(silence_ref, val); + } + IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n", + silence_rssi_a, silence_rssi_b, silence_rssi_c, + silence_ref); + + /* Find max rx energy (min value!) among all 3 receivers, + * measured during beacon frame. + * Save it in 10-beacon history table. */ + i = data->nrg_energy_idx; + val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c); + data->nrg_value[i] = min(rx_info->beacon_energy_a, val); + + data->nrg_energy_idx++; + if (data->nrg_energy_idx >= 10) + data->nrg_energy_idx = 0; + + /* Find min rx energy (max value) across 10 beacon history. + * This is the minimum signal level that we want to receive well. + * Add backoff (margin so we don't miss slightly lower energy frames). + * This establishes an upper bound (min value) for energy threshold. */ + max_nrg_cck = data->nrg_value[0]; + for (i = 1; i < 10; i++) + max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i])); + max_nrg_cck += 6; + + IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n", + rx_info->beacon_energy_a, rx_info->beacon_energy_b, + rx_info->beacon_energy_c, max_nrg_cck - 6); + + /* Count number of consecutive beacons with fewer-than-desired + * false alarms. */ + if (false_alarms < min_false_alarms) + data->num_in_cck_no_fa++; + else + data->num_in_cck_no_fa = 0; + IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n", + data->num_in_cck_no_fa); + + /* If we got too many false alarms this time, reduce sensitivity */ + if ((false_alarms > max_false_alarms) && + (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) { + IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n", + false_alarms, max_false_alarms); + IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n"); + data->nrg_curr_state = IWL_FA_TOO_MANY; + /* Store for "fewer than desired" on later beacon */ + data->nrg_silence_ref = silence_ref; + + /* increase energy threshold (reduce nrg value) + * to decrease sensitivity */ + data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK; + /* Else if we got fewer than desired, increase sensitivity */ + } else if (false_alarms < min_false_alarms) { + data->nrg_curr_state = IWL_FA_TOO_FEW; + + /* Compare silence level with silence level for most recent + * healthy number or too many false alarms */ + data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref - + (s32)silence_ref; + + IWL_DEBUG_CALIB(priv, + "norm FA %u < min FA %u, silence diff %d\n", + false_alarms, min_false_alarms, + data->nrg_auto_corr_silence_diff); + + /* Increase value to increase sensitivity, but only if: + * 1a) previous beacon did *not* have *too many* false alarms + * 1b) AND there's a significant difference in Rx levels + * from a previous beacon with too many, or healthy # FAs + * OR 2) We've seen a lot of beacons (100) with too few + * false alarms */ + if ((data->nrg_prev_state != IWL_FA_TOO_MANY) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + + IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n"); + /* Increase nrg value to increase sensitivity */ + val = data->nrg_th_cck + NRG_STEP_CCK; + data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val); + } else { + IWL_DEBUG_CALIB(priv, + "... but not changing sensitivity\n"); + } + + /* Else we got a healthy number of false alarms, keep status quo */ + } else { + IWL_DEBUG_CALIB(priv, " FA in safe zone\n"); + data->nrg_curr_state = IWL_FA_GOOD_RANGE; + + /* Store for use in "fewer than desired" with later beacon */ + data->nrg_silence_ref = silence_ref; + + /* If previous beacon had too many false alarms, + * give it some extra margin by reducing sensitivity again + * (but don't go below measured energy of desired Rx) */ + if (IWL_FA_TOO_MANY == data->nrg_prev_state) { + IWL_DEBUG_CALIB(priv, "... increasing margin\n"); + if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN)) + data->nrg_th_cck -= NRG_MARGIN; + else + data->nrg_th_cck = max_nrg_cck; + } + } + + /* Make sure the energy threshold does not go above the measured + * energy of the desired Rx signals (reduced by backoff margin), + * or else we might start missing Rx frames. + * Lower value is higher energy, so we use max()! + */ + data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck); + IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck); + + data->nrg_prev_state = data->nrg_curr_state; + + /* Auto-correlation CCK algorithm */ + if (false_alarms > min_false_alarms) { + + /* increase auto_corr values to decrease sensitivity + * so the DSP won't be disturbed by the noise + */ + if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK) + data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1; + else { + val = data->auto_corr_cck + AUTO_CORR_STEP_CCK; + data->auto_corr_cck = + min((u32)ranges->auto_corr_max_cck, val); + } + val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = + min((u32)ranges->auto_corr_max_cck_mrc, val); + } else if ((false_alarms < min_false_alarms) && + ((data->nrg_auto_corr_silence_diff > NRG_DIFF) || + (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) { + + /* Decrease auto_corr values to increase sensitivity */ + val = data->auto_corr_cck - AUTO_CORR_STEP_CCK; + data->auto_corr_cck = + max((u32)ranges->auto_corr_min_cck, val); + val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK; + data->auto_corr_cck_mrc = + max((u32)ranges->auto_corr_min_cck_mrc, val); + } + + return 0; +} + + +static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv, + u32 norm_fa, + u32 rx_enable_time) +{ + u32 val; + u32 false_alarms = norm_fa * 200 * 1024; + u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; + u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + data = &(priv->sensitivity_data); + + /* If we got too many false alarms this time, reduce sensitivity */ + if (false_alarms > max_false_alarms) { + + IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n", + false_alarms, max_false_alarms); + + val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + min((u32)ranges->auto_corr_max_ofdm, val); + + val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + min((u32)ranges->auto_corr_max_ofdm_mrc, val); + + val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + min((u32)ranges->auto_corr_max_ofdm_x1, val); + + val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val); + } + + /* Else if we got fewer than desired, increase sensitivity */ + else if (false_alarms < min_false_alarms) { + + IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n", + false_alarms, min_false_alarms); + + val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm = + max((u32)ranges->auto_corr_min_ofdm, val); + + val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc = + max((u32)ranges->auto_corr_min_ofdm_mrc, val); + + val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_x1 = + max((u32)ranges->auto_corr_min_ofdm_x1, val); + + val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM; + data->auto_corr_ofdm_mrc_x1 = + max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val); + } else { + IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n", + min_false_alarms, false_alarms, max_false_alarms); + } + return 0; +} + +static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv, + struct iwl_sensitivity_data *data, + __le16 *tbl) +{ + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm); + tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc); + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_x1); + tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1); + + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck); + tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16((u16)data->auto_corr_cck_mrc); + + tbl[HD_MIN_ENERGY_CCK_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_cck); + tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] = + cpu_to_le16((u16)data->nrg_th_ofdm); + + tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = + cpu_to_le16(data->barker_corr_th_min); + tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = + cpu_to_le16(data->barker_corr_th_min_mrc); + tbl[HD_OFDM_ENERGY_TH_IN_INDEX] = + cpu_to_le16(data->nrg_th_cca); + + IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", + data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, + data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1, + data->nrg_th_ofdm); + + IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n", + data->auto_corr_cck, data->auto_corr_cck_mrc, + data->nrg_th_cck); +} + +/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ +static int iwl4965_sensitivity_write(struct iwl_priv *priv) +{ + struct iwl_sensitivity_cmd cmd; + struct iwl_sensitivity_data *data = NULL; + struct iwl_host_cmd cmd_out = { + .id = SENSITIVITY_CMD, + .len = sizeof(struct iwl_sensitivity_cmd), + .flags = CMD_ASYNC, + .data = &cmd, + }; + + data = &(priv->sensitivity_data); + + memset(&cmd, 0, sizeof(cmd)); + + iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]); + + /* Update uCode's "work" table, and copy it to DSP */ + cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE; + + /* Don't send command to uCode if nothing has changed */ + if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]), + sizeof(u16)*HD_TABLE_SIZE)) { + IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n"); + return 0; + } + + /* Copy table for comparison next time */ + memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), + sizeof(u16)*HD_TABLE_SIZE); + + return iwl_legacy_send_cmd(priv, &cmd_out); +} + +void iwl4965_init_sensitivity(struct iwl_priv *priv) +{ + int ret = 0; + int i; + struct iwl_sensitivity_data *data = NULL; + const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; + + if (priv->disable_sens_cal) + return; + + IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n"); + + /* Clear driver's sensitivity algo data */ + data = &(priv->sensitivity_data); + + if (ranges == NULL) + return; + + memset(data, 0, sizeof(struct iwl_sensitivity_data)); + + data->num_in_cck_no_fa = 0; + data->nrg_curr_state = IWL_FA_TOO_MANY; + data->nrg_prev_state = IWL_FA_TOO_MANY; + data->nrg_silence_ref = 0; + data->nrg_silence_idx = 0; + data->nrg_energy_idx = 0; + + for (i = 0; i < 10; i++) + data->nrg_value[i] = 0; + + for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) + data->nrg_silence_rssi[i] = 0; + + data->auto_corr_ofdm = ranges->auto_corr_min_ofdm; + data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; + data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; + data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; + data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF; + data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; + data->nrg_th_cck = ranges->nrg_th_cck; + data->nrg_th_ofdm = ranges->nrg_th_ofdm; + data->barker_corr_th_min = ranges->barker_corr_th_min; + data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc; + data->nrg_th_cca = ranges->nrg_th_cca; + + data->last_bad_plcp_cnt_ofdm = 0; + data->last_fa_cnt_ofdm = 0; + data->last_bad_plcp_cnt_cck = 0; + data->last_fa_cnt_cck = 0; + + ret |= iwl4965_sensitivity_write(priv); + IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret); +} + +void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp) +{ + u32 rx_enable_time; + u32 fa_cck; + u32 fa_ofdm; + u32 bad_plcp_cck; + u32 bad_plcp_ofdm; + u32 norm_fa_ofdm; + u32 norm_fa_cck; + struct iwl_sensitivity_data *data = NULL; + struct statistics_rx_non_phy *rx_info; + struct statistics_rx_phy *ofdm, *cck; + unsigned long flags; + struct statistics_general_data statis; + + if (priv->disable_sens_cal) + return; + + data = &(priv->sensitivity_data); + + if (!iwl_legacy_is_any_associated(priv)) { + IWL_DEBUG_CALIB(priv, "<< - not associated\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + + rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general); + ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm); + cck = &(((struct iwl_notif_statistics *)resp)->rx.cck); + + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + /* Extract Statistics: */ + rx_enable_time = le32_to_cpu(rx_info->channel_load); + fa_cck = le32_to_cpu(cck->false_alarm_cnt); + fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt); + bad_plcp_cck = le32_to_cpu(cck->plcp_err); + bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err); + + statis.beacon_silence_rssi_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a); + statis.beacon_silence_rssi_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b); + statis.beacon_silence_rssi_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c); + statis.beacon_energy_a = + le32_to_cpu(rx_info->beacon_energy_a); + statis.beacon_energy_b = + le32_to_cpu(rx_info->beacon_energy_b); + statis.beacon_energy_c = + le32_to_cpu(rx_info->beacon_energy_c); + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); + + if (!rx_enable_time) { + IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n"); + return; + } + + /* These statistics increase monotonically, and do not reset + * at each beacon. Calculate difference from last value, or just + * use the new statistics value if it has reset or wrapped around. */ + if (data->last_bad_plcp_cnt_cck > bad_plcp_cck) + data->last_bad_plcp_cnt_cck = bad_plcp_cck; + else { + bad_plcp_cck -= data->last_bad_plcp_cnt_cck; + data->last_bad_plcp_cnt_cck += bad_plcp_cck; + } + + if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm) + data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm; + else { + bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm; + data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm; + } + + if (data->last_fa_cnt_ofdm > fa_ofdm) + data->last_fa_cnt_ofdm = fa_ofdm; + else { + fa_ofdm -= data->last_fa_cnt_ofdm; + data->last_fa_cnt_ofdm += fa_ofdm; + } + + if (data->last_fa_cnt_cck > fa_cck) + data->last_fa_cnt_cck = fa_cck; + else { + fa_cck -= data->last_fa_cnt_cck; + data->last_fa_cnt_cck += fa_cck; + } + + /* Total aborted signal locks */ + norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm; + norm_fa_cck = fa_cck + bad_plcp_cck; + + IWL_DEBUG_CALIB(priv, + "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck, + bad_plcp_cck, fa_ofdm, bad_plcp_ofdm); + + iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); + iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); + + iwl4965_sensitivity_write(priv); +} + +static inline u8 iwl4965_find_first_chain(u8 mask) +{ + if (mask & ANT_A) + return CHAIN_A; + if (mask & ANT_B) + return CHAIN_B; + return CHAIN_C; +} + +/** + * Run disconnected antenna algorithm to find out which antennas are + * disconnected. + */ +static void +iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig, + struct iwl_chain_noise_data *data) +{ + u32 active_chains = 0; + u32 max_average_sig; + u16 max_average_sig_antenna_i; + u8 num_tx_chains; + u8 first_chain; + u16 i = 0; + + average_sig[0] = data->chain_signal_a / + priv->cfg->base_params->chain_noise_num_beacons; + average_sig[1] = data->chain_signal_b / + priv->cfg->base_params->chain_noise_num_beacons; + average_sig[2] = data->chain_signal_c / + priv->cfg->base_params->chain_noise_num_beacons; + + if (average_sig[0] >= average_sig[1]) { + max_average_sig = average_sig[0]; + max_average_sig_antenna_i = 0; + active_chains = (1 << max_average_sig_antenna_i); + } else { + max_average_sig = average_sig[1]; + max_average_sig_antenna_i = 1; + active_chains = (1 << max_average_sig_antenna_i); + } + + if (average_sig[2] >= max_average_sig) { + max_average_sig = average_sig[2]; + max_average_sig_antenna_i = 2; + active_chains = (1 << max_average_sig_antenna_i); + } + + IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n", + average_sig[0], average_sig[1], average_sig[2]); + IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n", + max_average_sig, max_average_sig_antenna_i); + + /* Compare signal strengths for all 3 receivers. */ + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (i != max_average_sig_antenna_i) { + s32 rssi_delta = (max_average_sig - average_sig[i]); + + /* If signal is very weak, compared with + * strongest, mark it as disconnected. */ + if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS) + data->disconn_array[i] = 1; + else + active_chains |= (1 << i); + IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d " + "disconn_array[i] = %d\n", + i, rssi_delta, data->disconn_array[i]); + } + } + + /* + * The above algorithm sometimes fails when the ucode + * reports 0 for all chains. It's not clear why that + * happens to start with, but it is then causing trouble + * because this can make us enable more chains than the + * hardware really has. + * + * To be safe, simply mask out any chains that we know + * are not on the device. + */ + active_chains &= priv->hw_params.valid_rx_ant; + + num_tx_chains = 0; + for (i = 0; i < NUM_RX_CHAINS; i++) { + /* loops on all the bits of + * priv->hw_setting.valid_tx_ant */ + u8 ant_msk = (1 << i); + if (!(priv->hw_params.valid_tx_ant & ant_msk)) + continue; + + num_tx_chains++; + if (data->disconn_array[i] == 0) + /* there is a Tx antenna connected */ + break; + if (num_tx_chains == priv->hw_params.tx_chains_num && + data->disconn_array[i]) { + /* + * If all chains are disconnected + * connect the first valid tx chain + */ + first_chain = + iwl4965_find_first_chain(priv->cfg->valid_tx_ant); + data->disconn_array[first_chain] = 0; + active_chains |= BIT(first_chain); + IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \ + W/A - declare %d as connected\n", + first_chain); + break; + } + } + + if (active_chains != priv->hw_params.valid_rx_ant && + active_chains != priv->chain_noise_data.active_chains) + IWL_DEBUG_CALIB(priv, + "Detected that not all antennas are connected! " + "Connected: %#x, valid: %#x.\n", + active_chains, priv->hw_params.valid_rx_ant); + + /* Save for use within RXON, TX, SCAN commands, etc. */ + data->active_chains = active_chains; + IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n", + active_chains); +} + +static void iwl4965_gain_computation(struct iwl_priv *priv, + u32 *average_noise, + u16 min_average_noise_antenna_i, + u32 min_average_noise, + u8 default_chain) +{ + int i, ret; + struct iwl_chain_noise_data *data = &priv->chain_noise_data; + + data->delta_gain_code[min_average_noise_antenna_i] = 0; + + for (i = default_chain; i < NUM_RX_CHAINS; i++) { + s32 delta_g = 0; + + if (!(data->disconn_array[i]) && + (data->delta_gain_code[i] == + CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { + delta_g = average_noise[i] - min_average_noise; + data->delta_gain_code[i] = (u8)((delta_g * 10) / 15); + data->delta_gain_code[i] = + min(data->delta_gain_code[i], + (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); + + data->delta_gain_code[i] = + (data->delta_gain_code[i] | (1 << 2)); + } else { + data->delta_gain_code[i] = 0; + } + } + IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n", + data->delta_gain_code[0], + data->delta_gain_code[1], + data->delta_gain_code[2]); + + /* Differential gain gets sent to uCode only once */ + if (!data->radio_write) { + struct iwl_calib_diff_gain_cmd cmd; + data->radio_write = 1; + + memset(&cmd, 0, sizeof(cmd)); + cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; + cmd.diff_gain_a = data->delta_gain_code[0]; + cmd.diff_gain_b = data->delta_gain_code[1]; + cmd.diff_gain_c = data->delta_gain_code[2]; + ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + sizeof(cmd), &cmd); + if (ret) + IWL_DEBUG_CALIB(priv, "fail sending cmd " + "REPLY_PHY_CALIBRATION_CMD\n"); + + /* TODO we might want recalculate + * rx_chain in rxon cmd */ + + /* Mark so we run this algo only once! */ + data->state = IWL_CHAIN_NOISE_CALIBRATED; + } +} + + + +/* + * Accumulate 16 beacons of signal and noise statistics for each of + * 3 receivers/antennas/rx-chains, then figure out: + * 1) Which antennas are connected. + * 2) Differential rx gain settings to balance the 3 receivers. + */ +void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) +{ + struct iwl_chain_noise_data *data = NULL; + + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_sig_a; + u32 chain_sig_b; + u32 chain_sig_c; + u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; + u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; + u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; + u16 i = 0; + u16 rxon_chnum = INITIALIZATION_VALUE; + u16 stat_chnum = INITIALIZATION_VALUE; + u8 rxon_band24; + u8 stat_band24; + unsigned long flags; + struct statistics_rx_non_phy *rx_info; + + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (priv->disable_chain_noise_cal) + return; + + data = &(priv->chain_noise_data); + + /* + * Accumulate just the first "chain_noise_num_beacons" after + * the first association, then we're done forever. + */ + if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { + if (data->state == IWL_CHAIN_NOISE_ALIVE) + IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + + rx_info = &(((struct iwl_notif_statistics *)stat_resp)-> + rx.general); + + if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { + IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); + rxon_chnum = le16_to_cpu(ctx->staging.channel); + + stat_band24 = !!(((struct iwl_notif_statistics *) + stat_resp)->flag & + STATISTICS_REPLY_FLG_BAND_24G_MSK); + stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *) + stat_resp)->flag) >> 16; + + /* Make sure we accumulate data for just the associated channel + * (even if scanning). */ + if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { + IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n", + rxon_chnum, rxon_band24); + spin_unlock_irqrestore(&priv->lock, flags); + return; + } + + /* + * Accumulate beacon statistics values across + * "chain_noise_num_beacons" + */ + chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & + IN_BAND_FILTER; + chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & + IN_BAND_FILTER; + chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) & + IN_BAND_FILTER; + + chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER; + chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; + chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; + + spin_unlock_irqrestore(&priv->lock, flags); + + data->beacon_count++; + + data->chain_noise_a = (chain_noise_a + data->chain_noise_a); + data->chain_noise_b = (chain_noise_b + data->chain_noise_b); + data->chain_noise_c = (chain_noise_c + data->chain_noise_c); + + data->chain_signal_a = (chain_sig_a + data->chain_signal_a); + data->chain_signal_b = (chain_sig_b + data->chain_signal_b); + data->chain_signal_c = (chain_sig_c + data->chain_signal_c); + + IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n", + rxon_chnum, rxon_band24, data->beacon_count); + IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n", + chain_sig_a, chain_sig_b, chain_sig_c); + IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", + chain_noise_a, chain_noise_b, chain_noise_c); + + /* If this is the "chain_noise_num_beacons", determine: + * 1) Disconnected antennas (using signal strengths) + * 2) Differential gain (using silence noise) to balance receivers */ + if (data->beacon_count != + priv->cfg->base_params->chain_noise_num_beacons) + return; + + /* Analyze signal for disconnected antenna */ + iwl4965_find_disconn_antenna(priv, average_sig, data); + + /* Analyze noise for rx balance */ + average_noise[0] = data->chain_noise_a / + priv->cfg->base_params->chain_noise_num_beacons; + average_noise[1] = data->chain_noise_b / + priv->cfg->base_params->chain_noise_num_beacons; + average_noise[2] = data->chain_noise_c / + priv->cfg->base_params->chain_noise_num_beacons; + + for (i = 0; i < NUM_RX_CHAINS; i++) { + if (!(data->disconn_array[i]) && + (average_noise[i] <= min_average_noise)) { + /* This means that chain i is active and has + * lower noise values so far: */ + min_average_noise = average_noise[i]; + min_average_noise_antenna_i = i; + } + } + + IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n", + average_noise[0], average_noise[1], + average_noise[2]); + + IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n", + min_average_noise, min_average_noise_antenna_i); + + iwl4965_gain_computation(priv, average_noise, + min_average_noise_antenna_i, min_average_noise, + iwl4965_find_first_chain(priv->cfg->valid_rx_ant)); + + /* Some power changes may have been made during the calibration. + * Update and commit the RXON + */ + if (priv->cfg->ops->lib->update_chain_flags) + priv->cfg->ops->lib->update_chain_flags(priv); + + data->state = IWL_CHAIN_NOISE_DONE; + iwl_legacy_power_update_mode(priv, false); +} + +void iwl4965_reset_run_time_calib(struct iwl_priv *priv) +{ + int i; + memset(&(priv->sensitivity_data), 0, + sizeof(struct iwl_sensitivity_data)); + memset(&(priv->chain_noise_data), 0, + sizeof(struct iwl_chain_noise_data)); + for (i = 0; i < NUM_RX_CHAINS; i++) + priv->chain_noise_data.delta_gain_code[i] = + CHAIN_NOISE_DELTA_GAIN_INIT_VAL; + + /* Ask for statistics now, the uCode will send notification + * periodically after association */ + iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.h b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h index 9f7b2f935964..f46c80e6e005 100644 --- a/drivers/net/wireless/iwlwifi/iwl-legacy.h +++ b/drivers/net/wireless/iwlegacy/iwl-4965-calib.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -59,21 +59,17 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *****************************************************************************/ +#ifndef __iwl_4965_calib_h__ +#define __iwl_4965_calib_h__ -#ifndef __iwl_legacy_h__ -#define __iwl_legacy_h__ +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-commands.h" -/* mac80211 handlers */ -int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed); -void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw); -void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u32 changes); -void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv, - struct ieee80211_tx_info *info, - __le16 fc, __le32 *tx_flags); +void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp); +void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp); +void iwl4965_init_sensitivity(struct iwl_priv *priv); +void iwl4965_reset_run_time_calib(struct iwl_priv *priv); +void iwl4965_calib_free_results(struct iwl_priv *priv); -irqreturn_t iwl_isr_legacy(int irq, void *data); - -#endif /* __iwl_legacy_h__ */ +#endif /* __iwl_4965_calib_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c new file mode 100644 index 000000000000..1c93665766e4 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c @@ -0,0 +1,774 @@ +/****************************************************************************** +* +* GPL LICENSE SUMMARY +* +* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of version 2 of the GNU General Public License as +* published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, but +* WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with this program; if not, write to the Free Software +* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, +* USA +* +* The full GNU General Public License is included in this distribution +* in the file called LICENSE.GPL. +* +* Contact Information: +* Intel Linux Wireless <ilw@linux.intel.com> +* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +*****************************************************************************/ +#include "iwl-4965.h" +#include "iwl-4965-debugfs.h" + +static const char *fmt_value = " %-30s %10u\n"; +static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; +static const char *fmt_header = + "%-32s current cumulative delta max\n"; + +static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) +{ + int p = 0; + u32 flag; + + flag = le32_to_cpu(priv->_4965.statistics.flag); + + p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag); + if (flag & UCODE_STATISTICS_CLEAR_MSK) + p += scnprintf(buf + p, bufsz - p, + "\tStatistics have been cleared\n"); + p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n", + (flag & UCODE_STATISTICS_FREQUENCY_MSK) + ? "2.4 GHz" : "5.2 GHz"); + p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n", + (flag & UCODE_STATISTICS_NARROW_BAND_MSK) + ? "enabled" : "disabled"); + + return p; +} + +ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_rx_phy) * 40 + + sizeof(struct statistics_rx_non_phy) * 40 + + sizeof(struct statistics_rx_ht_phy) * 40 + 400; + ssize_t ret; + struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm; + struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck; + struct statistics_rx_non_phy *general, *accum_general; + struct statistics_rx_non_phy *delta_general, *max_general; + struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* + * the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + ofdm = &priv->_4965.statistics.rx.ofdm; + cck = &priv->_4965.statistics.rx.cck; + general = &priv->_4965.statistics.rx.general; + ht = &priv->_4965.statistics.rx.ofdm_ht; + accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm; + accum_cck = &priv->_4965.accum_statistics.rx.cck; + accum_general = &priv->_4965.accum_statistics.rx.general; + accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht; + delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm; + delta_cck = &priv->_4965.delta_statistics.rx.cck; + delta_general = &priv->_4965.delta_statistics.rx.general; + delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht; + max_ofdm = &priv->_4965.max_delta.rx.ofdm; + max_cck = &priv->_4965.max_delta.rx.cck; + max_general = &priv->_4965.max_delta.rx.general; + max_ht = &priv->_4965.max_delta.rx.ofdm_ht; + + pos += iwl4965_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - OFDM:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_cnt:", + le32_to_cpu(ofdm->ina_cnt), + accum_ofdm->ina_cnt, + delta_ofdm->ina_cnt, max_ofdm->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_cnt:", + le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt, + delta_ofdm->fina_cnt, max_ofdm->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err, + delta_ofdm->plcp_err, max_ofdm->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err, + delta_ofdm->crc32_err, max_ofdm->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(ofdm->overrun_err), + accum_ofdm->overrun_err, delta_ofdm->overrun_err, + max_ofdm->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(ofdm->early_overrun_err), + accum_ofdm->early_overrun_err, + delta_ofdm->early_overrun_err, + max_ofdm->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(ofdm->crc32_good), + accum_ofdm->crc32_good, delta_ofdm->crc32_good, + max_ofdm->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "false_alarm_cnt:", + le32_to_cpu(ofdm->false_alarm_cnt), + accum_ofdm->false_alarm_cnt, + delta_ofdm->false_alarm_cnt, + max_ofdm->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(ofdm->fina_sync_err_cnt), + accum_ofdm->fina_sync_err_cnt, + delta_ofdm->fina_sync_err_cnt, + max_ofdm->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sfd_timeout:", + le32_to_cpu(ofdm->sfd_timeout), + accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout, + max_ofdm->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_timeout:", + le32_to_cpu(ofdm->fina_timeout), + accum_ofdm->fina_timeout, delta_ofdm->fina_timeout, + max_ofdm->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unresponded_rts:", + le32_to_cpu(ofdm->unresponded_rts), + accum_ofdm->unresponded_rts, + delta_ofdm->unresponded_rts, + max_ofdm->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(ofdm->rxe_frame_limit_overrun), + accum_ofdm->rxe_frame_limit_overrun, + delta_ofdm->rxe_frame_limit_overrun, + max_ofdm->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ack_cnt:", + le32_to_cpu(ofdm->sent_ack_cnt), + accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt, + max_ofdm->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_cts_cnt:", + le32_to_cpu(ofdm->sent_cts_cnt), + accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt, + max_ofdm->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(ofdm->sent_ba_rsp_cnt), + accum_ofdm->sent_ba_rsp_cnt, + delta_ofdm->sent_ba_rsp_cnt, + max_ofdm->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_self_kill:", + le32_to_cpu(ofdm->dsp_self_kill), + accum_ofdm->dsp_self_kill, + delta_ofdm->dsp_self_kill, + max_ofdm->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(ofdm->mh_format_err), + accum_ofdm->mh_format_err, + delta_ofdm->mh_format_err, + max_ofdm->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "re_acq_main_rssi_sum:", + le32_to_cpu(ofdm->re_acq_main_rssi_sum), + accum_ofdm->re_acq_main_rssi_sum, + delta_ofdm->re_acq_main_rssi_sum, + max_ofdm->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - CCK:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_cnt:", + le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt, + delta_cck->ina_cnt, max_cck->ina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_cnt:", + le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt, + delta_cck->fina_cnt, max_cck->fina_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(cck->plcp_err), accum_cck->plcp_err, + delta_cck->plcp_err, max_cck->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(cck->crc32_err), accum_cck->crc32_err, + delta_cck->crc32_err, max_cck->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(cck->overrun_err), + accum_cck->overrun_err, delta_cck->overrun_err, + max_cck->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(cck->early_overrun_err), + accum_cck->early_overrun_err, + delta_cck->early_overrun_err, + max_cck->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(cck->crc32_good), accum_cck->crc32_good, + delta_cck->crc32_good, max_cck->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "false_alarm_cnt:", + le32_to_cpu(cck->false_alarm_cnt), + accum_cck->false_alarm_cnt, + delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_sync_err_cnt:", + le32_to_cpu(cck->fina_sync_err_cnt), + accum_cck->fina_sync_err_cnt, + delta_cck->fina_sync_err_cnt, + max_cck->fina_sync_err_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sfd_timeout:", + le32_to_cpu(cck->sfd_timeout), + accum_cck->sfd_timeout, delta_cck->sfd_timeout, + max_cck->sfd_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "fina_timeout:", + le32_to_cpu(cck->fina_timeout), + accum_cck->fina_timeout, delta_cck->fina_timeout, + max_cck->fina_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unresponded_rts:", + le32_to_cpu(cck->unresponded_rts), + accum_cck->unresponded_rts, delta_cck->unresponded_rts, + max_cck->unresponded_rts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rxe_frame_lmt_ovrun:", + le32_to_cpu(cck->rxe_frame_limit_overrun), + accum_cck->rxe_frame_limit_overrun, + delta_cck->rxe_frame_limit_overrun, + max_cck->rxe_frame_limit_overrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ack_cnt:", + le32_to_cpu(cck->sent_ack_cnt), + accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt, + max_cck->sent_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_cts_cnt:", + le32_to_cpu(cck->sent_cts_cnt), + accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt, + max_cck->sent_cts_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sent_ba_rsp_cnt:", + le32_to_cpu(cck->sent_ba_rsp_cnt), + accum_cck->sent_ba_rsp_cnt, + delta_cck->sent_ba_rsp_cnt, + max_cck->sent_ba_rsp_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_self_kill:", + le32_to_cpu(cck->dsp_self_kill), + accum_cck->dsp_self_kill, delta_cck->dsp_self_kill, + max_cck->dsp_self_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(cck->mh_format_err), + accum_cck->mh_format_err, delta_cck->mh_format_err, + max_cck->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "re_acq_main_rssi_sum:", + le32_to_cpu(cck->re_acq_main_rssi_sum), + accum_cck->re_acq_main_rssi_sum, + delta_cck->re_acq_main_rssi_sum, + max_cck->re_acq_main_rssi_sum); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - GENERAL:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bogus_cts:", + le32_to_cpu(general->bogus_cts), + accum_general->bogus_cts, delta_general->bogus_cts, + max_general->bogus_cts); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bogus_ack:", + le32_to_cpu(general->bogus_ack), + accum_general->bogus_ack, delta_general->bogus_ack, + max_general->bogus_ack); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "non_bssid_frames:", + le32_to_cpu(general->non_bssid_frames), + accum_general->non_bssid_frames, + delta_general->non_bssid_frames, + max_general->non_bssid_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "filtered_frames:", + le32_to_cpu(general->filtered_frames), + accum_general->filtered_frames, + delta_general->filtered_frames, + max_general->filtered_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "non_channel_beacons:", + le32_to_cpu(general->non_channel_beacons), + accum_general->non_channel_beacons, + delta_general->non_channel_beacons, + max_general->non_channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "channel_beacons:", + le32_to_cpu(general->channel_beacons), + accum_general->channel_beacons, + delta_general->channel_beacons, + max_general->channel_beacons); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "num_missed_bcon:", + le32_to_cpu(general->num_missed_bcon), + accum_general->num_missed_bcon, + delta_general->num_missed_bcon, + max_general->num_missed_bcon); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "adc_rx_saturation_time:", + le32_to_cpu(general->adc_rx_saturation_time), + accum_general->adc_rx_saturation_time, + delta_general->adc_rx_saturation_time, + max_general->adc_rx_saturation_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ina_detect_search_tm:", + le32_to_cpu(general->ina_detection_search_time), + accum_general->ina_detection_search_time, + delta_general->ina_detection_search_time, + max_general->ina_detection_search_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_a:", + le32_to_cpu(general->beacon_silence_rssi_a), + accum_general->beacon_silence_rssi_a, + delta_general->beacon_silence_rssi_a, + max_general->beacon_silence_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_b:", + le32_to_cpu(general->beacon_silence_rssi_b), + accum_general->beacon_silence_rssi_b, + delta_general->beacon_silence_rssi_b, + max_general->beacon_silence_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_silence_rssi_c:", + le32_to_cpu(general->beacon_silence_rssi_c), + accum_general->beacon_silence_rssi_c, + delta_general->beacon_silence_rssi_c, + max_general->beacon_silence_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "interference_data_flag:", + le32_to_cpu(general->interference_data_flag), + accum_general->interference_data_flag, + delta_general->interference_data_flag, + max_general->interference_data_flag); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "channel_load:", + le32_to_cpu(general->channel_load), + accum_general->channel_load, + delta_general->channel_load, + max_general->channel_load); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dsp_false_alarms:", + le32_to_cpu(general->dsp_false_alarms), + accum_general->dsp_false_alarms, + delta_general->dsp_false_alarms, + max_general->dsp_false_alarms); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_a:", + le32_to_cpu(general->beacon_rssi_a), + accum_general->beacon_rssi_a, + delta_general->beacon_rssi_a, + max_general->beacon_rssi_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_b:", + le32_to_cpu(general->beacon_rssi_b), + accum_general->beacon_rssi_b, + delta_general->beacon_rssi_b, + max_general->beacon_rssi_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_rssi_c:", + le32_to_cpu(general->beacon_rssi_c), + accum_general->beacon_rssi_c, + delta_general->beacon_rssi_c, + max_general->beacon_rssi_c); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_a:", + le32_to_cpu(general->beacon_energy_a), + accum_general->beacon_energy_a, + delta_general->beacon_energy_a, + max_general->beacon_energy_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_b:", + le32_to_cpu(general->beacon_energy_b), + accum_general->beacon_energy_b, + delta_general->beacon_energy_b, + max_general->beacon_energy_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "beacon_energy_c:", + le32_to_cpu(general->beacon_energy_c), + accum_general->beacon_energy_c, + delta_general->beacon_energy_c, + max_general->beacon_energy_c); + + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Rx - OFDM_HT:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "plcp_err:", + le32_to_cpu(ht->plcp_err), accum_ht->plcp_err, + delta_ht->plcp_err, max_ht->plcp_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "overrun_err:", + le32_to_cpu(ht->overrun_err), accum_ht->overrun_err, + delta_ht->overrun_err, max_ht->overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "early_overrun_err:", + le32_to_cpu(ht->early_overrun_err), + accum_ht->early_overrun_err, + delta_ht->early_overrun_err, + max_ht->early_overrun_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_good:", + le32_to_cpu(ht->crc32_good), accum_ht->crc32_good, + delta_ht->crc32_good, max_ht->crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "crc32_err:", + le32_to_cpu(ht->crc32_err), accum_ht->crc32_err, + delta_ht->crc32_err, max_ht->crc32_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "mh_format_err:", + le32_to_cpu(ht->mh_format_err), + accum_ht->mh_format_err, + delta_ht->mh_format_err, max_ht->mh_format_err); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_crc32_good:", + le32_to_cpu(ht->agg_crc32_good), + accum_ht->agg_crc32_good, + delta_ht->agg_crc32_good, max_ht->agg_crc32_good); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_mpdu_cnt:", + le32_to_cpu(ht->agg_mpdu_cnt), + accum_ht->agg_mpdu_cnt, + delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg_cnt:", + le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt, + delta_ht->agg_cnt, max_ht->agg_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "unsupport_mcs:", + le32_to_cpu(ht->unsupport_mcs), + accum_ht->unsupport_mcs, + delta_ht->unsupport_mcs, max_ht->unsupport_mcs); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t iwl4965_ucode_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = (sizeof(struct statistics_tx) * 48) + 250; + ssize_t ret; + struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + tx = &priv->_4965.statistics.tx; + accum_tx = &priv->_4965.accum_statistics.tx; + delta_tx = &priv->_4965.delta_statistics.tx; + max_tx = &priv->_4965.max_delta.tx; + + pos += iwl4965_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_Tx:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "preamble:", + le32_to_cpu(tx->preamble_cnt), + accum_tx->preamble_cnt, + delta_tx->preamble_cnt, max_tx->preamble_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rx_detected_cnt:", + le32_to_cpu(tx->rx_detected_cnt), + accum_tx->rx_detected_cnt, + delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bt_prio_defer_cnt:", + le32_to_cpu(tx->bt_prio_defer_cnt), + accum_tx->bt_prio_defer_cnt, + delta_tx->bt_prio_defer_cnt, + max_tx->bt_prio_defer_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "bt_prio_kill_cnt:", + le32_to_cpu(tx->bt_prio_kill_cnt), + accum_tx->bt_prio_kill_cnt, + delta_tx->bt_prio_kill_cnt, + max_tx->bt_prio_kill_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "few_bytes_cnt:", + le32_to_cpu(tx->few_bytes_cnt), + accum_tx->few_bytes_cnt, + delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "cts_timeout:", + le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, + delta_tx->cts_timeout, max_tx->cts_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ack_timeout:", + le32_to_cpu(tx->ack_timeout), + accum_tx->ack_timeout, + delta_tx->ack_timeout, max_tx->ack_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "expected_ack_cnt:", + le32_to_cpu(tx->expected_ack_cnt), + accum_tx->expected_ack_cnt, + delta_tx->expected_ack_cnt, + max_tx->expected_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "actual_ack_cnt:", + le32_to_cpu(tx->actual_ack_cnt), + accum_tx->actual_ack_cnt, + delta_tx->actual_ack_cnt, + max_tx->actual_ack_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "dump_msdu_cnt:", + le32_to_cpu(tx->dump_msdu_cnt), + accum_tx->dump_msdu_cnt, + delta_tx->dump_msdu_cnt, + max_tx->dump_msdu_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "abort_nxt_frame_mismatch:", + le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), + accum_tx->burst_abort_next_frame_mismatch_cnt, + delta_tx->burst_abort_next_frame_mismatch_cnt, + max_tx->burst_abort_next_frame_mismatch_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "abort_missing_nxt_frame:", + le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), + accum_tx->burst_abort_missing_next_frame_cnt, + delta_tx->burst_abort_missing_next_frame_cnt, + max_tx->burst_abort_missing_next_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "cts_timeout_collision:", + le32_to_cpu(tx->cts_timeout_collision), + accum_tx->cts_timeout_collision, + delta_tx->cts_timeout_collision, + max_tx->cts_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "ack_ba_timeout_collision:", + le32_to_cpu(tx->ack_or_ba_timeout_collision), + accum_tx->ack_or_ba_timeout_collision, + delta_tx->ack_or_ba_timeout_collision, + max_tx->ack_or_ba_timeout_collision); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg ba_timeout:", + le32_to_cpu(tx->agg.ba_timeout), + accum_tx->agg.ba_timeout, + delta_tx->agg.ba_timeout, + max_tx->agg.ba_timeout); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg ba_resched_frames:", + le32_to_cpu(tx->agg.ba_reschedule_frames), + accum_tx->agg.ba_reschedule_frames, + delta_tx->agg.ba_reschedule_frames, + max_tx->agg.ba_reschedule_frames); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_agg_frame:", + le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), + accum_tx->agg.scd_query_agg_frame_cnt, + delta_tx->agg.scd_query_agg_frame_cnt, + max_tx->agg.scd_query_agg_frame_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_no_agg:", + le32_to_cpu(tx->agg.scd_query_no_agg), + accum_tx->agg.scd_query_no_agg, + delta_tx->agg.scd_query_no_agg, + max_tx->agg.scd_query_no_agg); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_agg:", + le32_to_cpu(tx->agg.scd_query_agg), + accum_tx->agg.scd_query_agg, + delta_tx->agg.scd_query_agg, + max_tx->agg.scd_query_agg); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg scd_query_mismatch:", + le32_to_cpu(tx->agg.scd_query_mismatch), + accum_tx->agg.scd_query_mismatch, + delta_tx->agg.scd_query_mismatch, + max_tx->agg.scd_query_mismatch); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg frame_not_ready:", + le32_to_cpu(tx->agg.frame_not_ready), + accum_tx->agg.frame_not_ready, + delta_tx->agg.frame_not_ready, + max_tx->agg.frame_not_ready); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg underrun:", + le32_to_cpu(tx->agg.underrun), + accum_tx->agg.underrun, + delta_tx->agg.underrun, max_tx->agg.underrun); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg bt_prio_kill:", + le32_to_cpu(tx->agg.bt_prio_kill), + accum_tx->agg.bt_prio_kill, + delta_tx->agg.bt_prio_kill, + max_tx->agg.bt_prio_kill); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "agg rx_ba_rsp_cnt:", + le32_to_cpu(tx->agg.rx_ba_rsp_cnt), + accum_tx->agg.rx_ba_rsp_cnt, + delta_tx->agg.rx_ba_rsp_cnt, + max_tx->agg.rx_ba_rsp_cnt); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +ssize_t +iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0; + char *buf; + int bufsz = sizeof(struct statistics_general) * 10 + 300; + ssize_t ret; + struct statistics_general_common *general, *accum_general; + struct statistics_general_common *delta_general, *max_general; + struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; + struct statistics_div *div, *accum_div, *delta_div, *max_div; + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + /* the statistic information display here is based on + * the last statistics notification from uCode + * might not reflect the current uCode activity + */ + general = &priv->_4965.statistics.general.common; + dbg = &priv->_4965.statistics.general.common.dbg; + div = &priv->_4965.statistics.general.common.div; + accum_general = &priv->_4965.accum_statistics.general.common; + accum_dbg = &priv->_4965.accum_statistics.general.common.dbg; + accum_div = &priv->_4965.accum_statistics.general.common.div; + delta_general = &priv->_4965.delta_statistics.general.common; + max_general = &priv->_4965.max_delta.general.common; + delta_dbg = &priv->_4965.delta_statistics.general.common.dbg; + max_dbg = &priv->_4965.max_delta.general.common.dbg; + delta_div = &priv->_4965.delta_statistics.general.common.div; + max_div = &priv->_4965.max_delta.general.common.div; + + pos += iwl4965_statistics_flag(priv, buf, bufsz); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_header, "Statistics_General:"); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "temperature:", + le32_to_cpu(general->temperature)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_value, "ttl_timestamp:", + le32_to_cpu(general->ttl_timestamp)); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "burst_check:", + le32_to_cpu(dbg->burst_check), + accum_dbg->burst_check, + delta_dbg->burst_check, max_dbg->burst_check); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "burst_count:", + le32_to_cpu(dbg->burst_count), + accum_dbg->burst_count, + delta_dbg->burst_count, max_dbg->burst_count); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "wait_for_silence_timeout_count:", + le32_to_cpu(dbg->wait_for_silence_timeout_cnt), + accum_dbg->wait_for_silence_timeout_cnt, + delta_dbg->wait_for_silence_timeout_cnt, + max_dbg->wait_for_silence_timeout_cnt); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "sleep_time:", + le32_to_cpu(general->sleep_time), + accum_general->sleep_time, + delta_general->sleep_time, max_general->sleep_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "slots_out:", + le32_to_cpu(general->slots_out), + accum_general->slots_out, + delta_general->slots_out, max_general->slots_out); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "slots_idle:", + le32_to_cpu(general->slots_idle), + accum_general->slots_idle, + delta_general->slots_idle, max_general->slots_idle); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "tx_on_a:", + le32_to_cpu(div->tx_on_a), accum_div->tx_on_a, + delta_div->tx_on_a, max_div->tx_on_a); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "tx_on_b:", + le32_to_cpu(div->tx_on_b), accum_div->tx_on_b, + delta_div->tx_on_b, max_div->tx_on_b); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "exec_time:", + le32_to_cpu(div->exec_time), accum_div->exec_time, + delta_div->exec_time, max_div->exec_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "probe_time:", + le32_to_cpu(div->probe_time), accum_div->probe_time, + delta_div->probe_time, max_div->probe_time); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "rx_enable_counter:", + le32_to_cpu(general->rx_enable_counter), + accum_general->rx_enable_counter, + delta_general->rx_enable_counter, + max_general->rx_enable_counter); + pos += scnprintf(buf + pos, bufsz - pos, + fmt_table, "num_of_sos_states:", + le32_to_cpu(general->num_of_sos_states), + accum_general->num_of_sos_states, + delta_general->num_of_sos_states, + max_general->num_of_sos_states); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h new file mode 100644 index 000000000000..6c8e35361a9e --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h @@ -0,0 +1,59 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +ssize_t iwl4965_ucode_general_stats_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos); +#else +static ssize_t +iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} +static ssize_t +iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} +static ssize_t +iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return 0; +} +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c new file mode 100644 index 000000000000..cb9baab1ff7d --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c @@ -0,0 +1,154 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> + +#include <net/mac80211.h> + +#include "iwl-commands.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" +#include "iwl-4965.h" +#include "iwl-io.h" + +/****************************************************************************** + * + * EEPROM related functions + * +******************************************************************************/ + +/* + * The device's EEPROM semaphore prevents conflicts between driver and uCode + * when accessing the EEPROM; each access is a series of pulses to/from the + * EEPROM chip, not a single event, so even reads could conflict if they + * weren't arbitrated by the semaphore. + */ +int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv) +{ + u16 count; + int ret; + + for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { + /* Request semaphore */ + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + + /* See if we got it */ + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + EEPROM_SEM_TIMEOUT); + if (ret >= 0) { + IWL_DEBUG_IO(priv, + "Acquired semaphore after %d tries.\n", + count+1); + return ret; + } + } + + return ret; +} + +void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv) +{ + iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); + +} + +int iwl4965_eeprom_check_version(struct iwl_priv *priv) +{ + u16 eeprom_ver; + u16 calib_ver; + + eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION); + calib_ver = iwl_legacy_eeprom_query16(priv, + EEPROM_4965_CALIB_VERSION_OFFSET); + + if (eeprom_ver < priv->cfg->eeprom_ver || + calib_ver < priv->cfg->eeprom_calib_ver) + goto err; + + IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n", + eeprom_ver, calib_ver); + + return 0; +err: + IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x " + "CALIB=0x%x < 0x%x\n", + eeprom_ver, priv->cfg->eeprom_ver, + calib_ver, priv->cfg->eeprom_calib_ver); + return -EINVAL; + +} + +void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) +{ + const u8 *addr = iwl_legacy_eeprom_query_addr(priv, + EEPROM_MAC_ADDRESS); + memcpy(mac, addr, ETH_ALEN); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h index 9166794eda0d..08b189c8472d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h +++ b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h @@ -5,7 +5,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,7 +30,7 @@ * * BSD LICENSE * - * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -789,4 +789,26 @@ struct iwl4965_scd_bc_tbl { u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)]; } __packed; + +#define IWL4965_RTC_INST_LOWER_BOUND (0x000000) + +/* RSSI to dBm */ +#define IWL4965_RSSI_OFFSET 44 + +/* PCI registers */ +#define PCI_CFG_RETRY_TIMEOUT 0x041 + +/* PCI register values */ +#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01 +#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02 + +#define IWL4965_DEFAULT_TX_RETRY 15 + +/* Limit range of txpower output target to be between these values */ +#define IWL4965_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */ + +/* EEPROM */ +#define IWL4965_FIRST_AMPDU_QUEUE 10 + + #endif /* !__iwl_4965_hw_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c new file mode 100644 index 000000000000..26d324e30692 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.c @@ -0,0 +1,74 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> + +#include "iwl-commands.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-4965-led.h" + +/* Send led command */ +static int +iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_LEDS_CMD, + .len = sizeof(struct iwl_led_cmd), + .data = led_cmd, + .flags = CMD_ASYNC, + .callback = NULL, + }; + u32 reg; + + reg = iwl_read32(priv, CSR_LED_REG); + if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) + iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK); + + return iwl_legacy_send_cmd(priv, &cmd); +} + +/* Set led register off */ +void iwl4965_led_enable(struct iwl_priv *priv) +{ + iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON); +} + +const struct iwl_led_ops iwl4965_led_ops = { + .cmd = iwl4965_send_led_cmd, +}; diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h new file mode 100644 index 000000000000..5ed3615fc338 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-led.h @@ -0,0 +1,33 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_4965_led_h__ +#define __iwl_4965_led_h__ + +extern const struct iwl_led_ops iwl4965_led_ops; +void iwl4965_led_enable(struct iwl_priv *priv); + +#endif /* __iwl_4965_led_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c new file mode 100644 index 000000000000..5a8a3cce27bc --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c @@ -0,0 +1,1260 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" +#include "iwl-sta.h" + +void iwl4965_check_abort_status(struct iwl_priv *priv, + u8 frame_count, u32 status) +{ + if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) { + IWL_ERR(priv, "Tx flush command to flush out all frames\n"); + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->tx_flush); + } +} + +/* + * EEPROM + */ +struct iwl_mod_params iwl4965_mod_params = { + .amsdu_size_8K = 1, + .restart_fw = 1, + /* the rest are 0 by default */ +}; + +void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + unsigned long flags; + int i; + spin_lock_irqsave(&rxq->lock, flags); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { + /* In the reset function, these buffers may have been allocated + * to an SKB, so we need to unmap and free potential storage */ + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + } + + for (i = 0; i < RX_QUEUE_SIZE; i++) + rxq->queue[i] = NULL; + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + spin_unlock_irqrestore(&rxq->lock, flags); +} + +int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + u32 rb_size; + const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ + u32 rb_timeout = 0; + + if (priv->cfg->mod_params->amsdu_size_8K) + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; + else + rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; + + /* Stop Rx DMA */ + iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + + /* Reset driver's Rx queue write index */ + iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + + /* Tell device where to find RBD circular buffer in DRAM */ + iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + (u32)(rxq->bd_dma >> 8)); + + /* Tell device where in DRAM to update its Rx status */ + iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, + rxq->rb_stts_dma >> 4); + + /* Enable Rx DMA + * Direct rx interrupts to hosts + * Rx buffer size 4 or 8k + * RB timeout 0x10 + * 256 RBDs + */ + iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK | + rb_size| + (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| + (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + return 0; +} + +static void iwl4965_set_pwr_vmain(struct iwl_priv *priv) +{ +/* + * (for documentation purposes) + * to set power to V_AUX, do: + + if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VAUX, + ~APMG_PS_CTRL_MSK_PWR_SRC); + */ + + iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, + ~APMG_PS_CTRL_MSK_PWR_SRC); +} + +int iwl4965_hw_nic_init(struct iwl_priv *priv) +{ + unsigned long flags; + struct iwl_rx_queue *rxq = &priv->rxq; + int ret; + + /* nic_init */ + spin_lock_irqsave(&priv->lock, flags); + priv->cfg->ops->lib->apm_ops.init(priv); + + /* Set interrupt coalescing calibration timer to default (512 usecs) */ + iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); + + spin_unlock_irqrestore(&priv->lock, flags); + + iwl4965_set_pwr_vmain(priv); + + priv->cfg->ops->lib->apm_ops.config(priv); + + /* Allocate the RX queue, or reset if it is already allocated */ + if (!rxq->bd) { + ret = iwl_legacy_rx_queue_alloc(priv); + if (ret) { + IWL_ERR(priv, "Unable to initialize Rx queue\n"); + return -ENOMEM; + } + } else + iwl4965_rx_queue_reset(priv, rxq); + + iwl4965_rx_replenish(priv); + + iwl4965_rx_init(priv, rxq); + + spin_lock_irqsave(&priv->lock, flags); + + rxq->need_update = 1; + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Allocate or reset and init all Tx and Command queues */ + if (!priv->txq) { + ret = iwl4965_txq_ctx_alloc(priv); + if (ret) + return ret; + } else + iwl4965_txq_ctx_reset(priv); + + set_bit(STATUS_INIT, &priv->status); + + return 0; +} + +/** + * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr + */ +static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv, + dma_addr_t dma_addr) +{ + return cpu_to_le32((u32)(dma_addr >> 8)); +} + +/** + * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +void iwl4965_rx_queue_restock(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + unsigned long flags; + + spin_lock_irqsave(&rxq->lock, flags); + while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + /* The overwritten rxb must be a used one */ + rxb = rxq->queue[rxq->write]; + BUG_ON(rxb && rxb->page); + + /* Get next free Rx buffer, remove from free list */ + element = rxq->rx_free.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + /* Point to Rx buffer via next RBD in circular buffer */ + rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, + rxb->page_dma); + rxq->queue[rxq->write] = rxb; + rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; + rxq->free_count--; + } + spin_unlock_irqrestore(&rxq->lock, flags); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + queue_work(priv->workqueue, &priv->rx_replenish); + + + /* If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock_irqsave(&rxq->lock, flags); + rxq->need_update = 1; + spin_unlock_irqrestore(&rxq->lock, flags); + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); + } +} + +/** + * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free + * + * When moving to rx_free an SKB is allocated for the slot. + * + * Also restock the Rx queue via iwl_rx_queue_restock. + * This is called as a scheduled work item (except for during initialization) + */ +static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct list_head *element; + struct iwl_rx_mem_buffer *rxb; + struct page *page; + unsigned long flags; + gfp_t gfp_mask = priority; + + while (1) { + spin_lock_irqsave(&rxq->lock, flags); + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + return; + } + spin_unlock_irqrestore(&rxq->lock, flags); + + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (priv->hw_params.rx_page_order > 0) + gfp_mask |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(priv, "alloc_pages failed, " + "order: %d\n", + priv->hw_params.rx_page_order); + + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(priv, + "Failed to alloc_pages with %s. " + "Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? + "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ + return; + } + + spin_lock_irqsave(&rxq->lock, flags); + + if (list_empty(&rxq->rx_used)) { + spin_unlock_irqrestore(&rxq->lock, flags); + __free_pages(page, priv->hw_params.rx_page_order); + return; + } + element = rxq->rx_used.next; + rxb = list_entry(element, struct iwl_rx_mem_buffer, list); + list_del(element); + + spin_unlock_irqrestore(&rxq->lock, flags); + + BUG_ON(rxb->page); + rxb->page = page; + /* Get physical address of the RB */ + rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + /* dma address must be no more than 36 bits */ + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); + /* and also 256 byte aligned! */ + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); + + spin_lock_irqsave(&rxq->lock, flags); + + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + priv->alloc_rxb_page++; + + spin_unlock_irqrestore(&rxq->lock, flags); + } +} + +void iwl4965_rx_replenish(struct iwl_priv *priv) +{ + unsigned long flags; + + iwl4965_rx_allocate(priv, GFP_KERNEL); + + spin_lock_irqsave(&priv->lock, flags); + iwl4965_rx_queue_restock(priv); + spin_unlock_irqrestore(&priv->lock, flags); +} + +void iwl4965_rx_replenish_now(struct iwl_priv *priv) +{ + iwl4965_rx_allocate(priv, GFP_ATOMIC); + + iwl4965_rx_queue_restock(priv); +} + +/* Assumes that the skb field of the buffers in 'pool' is kept accurate. + * If an SKB has been detached, the POOL needs to have its SKB set to NULL + * This free routine walks the list of POOL entries and if SKB is set to + * non NULL it is unmapped and freed + */ +void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) +{ + int i; + for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); + rxq->pool[i].page = NULL; + } + } + + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->bd_dma); + dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->bd = NULL; + rxq->rb_stts = NULL; +} + +int iwl4965_rxq_stop(struct iwl_priv *priv) +{ + + /* stop Rx DMA */ + iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + + return 0; +} + +int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) +{ + int idx = 0; + int band_offset = 0; + + /* HT rate format: mac80211 wants an MCS number, which is just LSB */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = (rate_n_flags & 0xff); + return idx; + /* Legacy rate format, search for match in table */ + } else { + if (band == IEEE80211_BAND_5GHZ) + band_offset = IWL_FIRST_OFDM_RATE; + for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++) + if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF)) + return idx - band_offset; + } + + return -1; +} + +static int iwl4965_calc_rssi(struct iwl_priv *priv, + struct iwl_rx_phy_res *rx_resp) +{ + /* data from PHY/DSP regarding signal strength, etc., + * contents are always there, not configurable by host. */ + struct iwl4965_rx_non_cfg_phy *ncphy = + (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; + u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK) + >> IWL49_AGC_DB_POS; + + u32 valid_antennae = + (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK) + >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET; + u8 max_rssi = 0; + u32 i; + + /* Find max rssi among 3 possible receivers. + * These values are measured by the digital signal processor (DSP). + * They should stay fairly constant even as the signal strength varies, + * if the radio's automatic gain control (AGC) is working right. + * AGC value (see below) will provide the "interesting" info. */ + for (i = 0; i < 3; i++) + if (valid_antennae & (1 << i)) + max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); + + IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", + ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], + max_rssi, agc); + + /* dBm = max_rssi dB - agc dB - constant. + * Higher AGC (higher radio gain) means lower signal. */ + return max_rssi - agc - IWL4965_RSSI_OFFSET; +} + + +static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) +{ + u32 decrypt_out = 0; + + if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == + RX_RES_STATUS_STATION_FOUND) + decrypt_out |= (RX_RES_STATUS_STATION_FOUND | + RX_RES_STATUS_NO_STATION_INFO_MISMATCH); + + decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); + + /* packet was not encrypted */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_NONE) + return decrypt_out; + + /* packet was encrypted with unknown alg */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_ERR) + return decrypt_out; + + /* decryption was not done in HW */ + if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != + RX_MPDU_RES_STATUS_DEC_DONE_MSK) + return decrypt_out; + + switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { + + case RX_RES_STATUS_SEC_TYPE_CCMP: + /* alg is CCM: check MIC only */ + if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) + /* Bad MIC */ + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + + break; + + case RX_RES_STATUS_SEC_TYPE_TKIP: + if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { + /* Bad TTAK */ + decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; + break; + } + /* fall through if TTAK OK */ + default: + if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + break; + } + + IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", + decrypt_in, decrypt_out); + + return decrypt_out; +} + +static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u16 len, + u32 ampdu_status, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct sk_buff *skb; + __le16 fc = hdr->frame_control; + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT(priv, + "Dropping packet while interface is not open.\n"); + return; + } + + /* In case of HW accelerated crypto and bad decryption, drop */ + if (!priv->cfg->mod_params->sw_crypto && + iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats)) + return; + + skb = dev_alloc_skb(128); + if (!skb) { + IWL_ERR(priv, "dev_alloc_skb failed\n"); + return; + } + + skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); + + iwl_legacy_update_stats(priv, false, fc, len); + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx(priv->hw, skb); + priv->alloc_rxb_page--; + rxb->page = NULL; +} + +/* Called for REPLY_RX (legacy ABG frames), or + * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ +void iwl4965_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct ieee80211_hdr *header; + struct ieee80211_rx_status rx_status; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_phy_res *phy_res; + __le32 rx_pkt_status; + struct iwl_rx_mpdu_res_start *amsdu; + u32 len; + u32 ampdu_status; + u32 rate_n_flags; + + /** + * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. + * REPLY_RX: physical layer info is in this buffer + * REPLY_RX_MPDU_CMD: physical layer info was sent in separate + * command and cached in priv->last_phy_res + * + * Here we set up local variables depending on which command is + * received. + */ + if (pkt->hdr.cmd == REPLY_RX) { + phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt); + + len = le16_to_cpu(phy_res->byte_count); + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt + len); + ampdu_status = le32_to_cpu(rx_pkt_status); + } else { + if (!priv->_4965.last_phy_res_valid) { + IWL_ERR(priv, "MPDU frame without cached PHY data\n"); + return; + } + phy_res = &priv->_4965.last_phy_res; + amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); + len = le16_to_cpu(amsdu->byte_count); + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); + ampdu_status = iwl4965_translate_rx_status(priv, + le32_to_cpu(rx_pkt_status)); + } + + if ((unlikely(phy_res->cfg_phy_cnt > 20))) { + IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", + phy_res->cfg_phy_cnt); + return; + } + + if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || + !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", + le32_to_cpu(rx_pkt_status)); + return; + } + + /* This will be used in several places later */ + rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); + + /* rx_status carries information about the packet to mac80211 */ + rx_status.mactime = le64_to_cpu(phy_res->timestamp); + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), + rx_status.band); + rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.rate_idx = + iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); + rx_status.flag = 0; + + /* TSF isn't reliable. In order to allow smooth user experience, + * this W/A doesn't propagate it to the mac80211 */ + /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ + + priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); + + /* Find max signal strength (dBm) among 3 antenna/receiver chains */ + rx_status.signal = iwl4965_calc_rssi(priv, phy_res); + + iwl_legacy_dbg_log_rx_data_frame(priv, len, header); + IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", + rx_status.signal, (unsigned long long)rx_status.mactime); + + /* + * "antenna number" + * + * It seems that the antenna field in the phy flags value + * is actually a bit field. This is undefined by radiotap, + * it wants an actual antenna number but I always get "7" + * for most legacy frames I receive indicating that the + * same frame was received on all three RX chains. + * + * I think this field should be removed in favor of a + * new 802.11n radiotap field "RX chains" that is defined + * as a bitmask. + */ + rx_status.antenna = + (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) + >> RX_RES_PHY_FLAGS_ANTENNA_POS; + + /* set the preamble flag if appropriate */ + if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + rx_status.flag |= RX_FLAG_SHORTPRE; + + /* Set up the HT phy flags */ + if (rate_n_flags & RATE_MCS_HT_MSK) + rx_status.flag |= RX_FLAG_HT; + if (rate_n_flags & RATE_MCS_HT40_MSK) + rx_status.flag |= RX_FLAG_40MHZ; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status.flag |= RX_FLAG_SHORT_GI; + + iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status, + rxb, &rx_status); +} + +/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). + * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ +void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + priv->_4965.last_phy_res_valid = true; + memcpy(&priv->_4965.last_phy_res, pkt->u.raw, + sizeof(struct iwl_rx_phy_res)); +} + +static int iwl4965_get_single_channel_for_scan(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum ieee80211_band band, + struct iwl_scan_channel *scan_ch) +{ + const struct ieee80211_supported_band *sband; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added = 0; + u16 channel = 0; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) { + IWL_ERR(priv, "invalid band\n"); + return added; + } + + active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0); + passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + channel = iwl_legacy_get_single_channel_number(priv, band); + if (channel) { + scan_ch->channel = cpu_to_le16(channel); + scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + /* Set txpower levels to defaults */ + scan_ch->dsp_atten = 110; + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tx_gain = ((1 << 5) | (5 << 3)); + added++; + } else + IWL_ERR(priv, "no valid channel found\n"); + return added; +} + +static int iwl4965_get_channels_for_scan(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum ieee80211_band band, + u8 is_active, u8 n_probes, + struct iwl_scan_channel *scan_ch) +{ + struct ieee80211_channel *chan; + const struct ieee80211_supported_band *sband; + const struct iwl_channel_info *ch_info; + u16 passive_dwell = 0; + u16 active_dwell = 0; + int added, i; + u16 channel; + + sband = iwl_get_hw_mode(priv, band); + if (!sband) + return 0; + + active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes); + passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); + + if (passive_dwell <= active_dwell) + passive_dwell = active_dwell + 1; + + for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { + chan = priv->scan_request->channels[i]; + + if (chan->band != band) + continue; + + channel = chan->hw_value; + scan_ch->channel = cpu_to_le16(channel); + + ch_info = iwl_legacy_get_channel_info(priv, band, channel); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN(priv, + "Channel %d is INVALID for this band.\n", + channel); + continue; + } + + if (!is_active || iwl_legacy_is_channel_passive(ch_info) || + (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) + scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE; + else + scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; + + if (n_probes) + scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes); + + scan_ch->active_dwell = cpu_to_le16(active_dwell); + scan_ch->passive_dwell = cpu_to_le16(passive_dwell); + + /* Set txpower levels to defaults */ + scan_ch->dsp_atten = 110; + + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level: + * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; + */ + if (band == IEEE80211_BAND_5GHZ) + scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tx_gain = ((1 << 5) | (5 << 3)); + + IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n", + channel, le32_to_cpu(scan_ch->type), + (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? + "ACTIVE" : "PASSIVE", + (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ? + active_dwell : passive_dwell); + + scan_ch++; + added++; + } + + IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); + return added; +} + +int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) +{ + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_CMD, + .len = sizeof(struct iwl_scan_cmd), + .flags = CMD_SIZE_HUGE, + }; + struct iwl_scan_cmd *scan; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u32 rate_flags = 0; + u16 cmd_len; + u16 rx_chain = 0; + enum ieee80211_band band; + u8 n_probes = 0; + u8 rx_ant = priv->hw_params.valid_rx_ant; + u8 rate; + bool is_active = false; + int chan_mod; + u8 active_chains; + u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; + int ret; + + lockdep_assert_held(&priv->mutex); + + if (vif) + ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + if (!priv->scan_cmd) { + priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) + + IWL_MAX_SCAN_SIZE, GFP_KERNEL); + if (!priv->scan_cmd) { + IWL_DEBUG_SCAN(priv, + "fail to allocate memory for scan\n"); + return -ENOMEM; + } + } + scan = priv->scan_cmd; + memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); + + scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; + scan->quiet_time = IWL_ACTIVE_QUIET_TIME; + + if (iwl_legacy_is_any_associated(priv)) { + u16 interval = 0; + u32 extra; + u32 suspend_time = 100; + u32 scan_suspend_time = 100; + + IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); + if (priv->is_internal_short_scan) + interval = 0; + else + interval = vif->bss_conf.beacon_int; + + scan->suspend_time = 0; + scan->max_out_time = cpu_to_le32(200 * 1024); + if (!interval) + interval = suspend_time; + + extra = (suspend_time / interval) << 22; + scan_suspend_time = (extra | + ((suspend_time % interval) * 1024)); + scan->suspend_time = cpu_to_le32(scan_suspend_time); + IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", + scan_suspend_time, interval); + } + + if (priv->is_internal_short_scan) { + IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); + } else if (priv->scan_request->n_ssids) { + int i, p = 0; + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + for (i = 0; i < priv->scan_request->n_ssids; i++) { + /* always does wildcard anyway */ + if (!priv->scan_request->ssids[i].ssid_len) + continue; + scan->direct_scan[p].id = WLAN_EID_SSID; + scan->direct_scan[p].len = + priv->scan_request->ssids[i].ssid_len; + memcpy(scan->direct_scan[p].ssid, + priv->scan_request->ssids[i].ssid, + priv->scan_request->ssids[i].ssid_len); + n_probes++; + p++; + } + is_active = true; + } else + IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); + + scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; + scan->tx_cmd.sta_id = ctx->bcast_sta_id; + scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + + switch (priv->scan_band) { + case IEEE80211_BAND_2GHZ: + scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; + chan_mod = le32_to_cpu( + priv->contexts[IWL_RXON_CTX_BSS].active.flags & + RXON_FLG_CHANNEL_MODE_MSK) + >> RXON_FLG_CHANNEL_MODE_POS; + if (chan_mod == CHANNEL_MODE_PURE_40) { + rate = IWL_RATE_6M_PLCP; + } else { + rate = IWL_RATE_1M_PLCP; + rate_flags = RATE_MCS_CCK_MSK; + } + break; + case IEEE80211_BAND_5GHZ: + rate = IWL_RATE_6M_PLCP; + break; + default: + IWL_WARN(priv, "Invalid scan band\n"); + return -EIO; + } + + /* + * If active scanning is requested but a certain channel is + * marked passive, we can do active scanning if we detect + * transmissions. + * + * There is an issue with some firmware versions that triggers + * a sysassert on a "good CRC threshold" of zero (== disabled), + * on a radar channel even though this means that we should NOT + * send probes. + * + * The "good CRC threshold" is the number of frames that we + * need to receive during our dwell time on a channel before + * sending out probes -- setting this to a huge value will + * mean we never reach it, but at the same time work around + * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER + * here instead of IWL_GOOD_CRC_TH_DISABLED. + */ + scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : + IWL_GOOD_CRC_TH_NEVER; + + band = priv->scan_band; + + if (priv->cfg->scan_rx_antennas[band]) + rx_ant = priv->cfg->scan_rx_antennas[band]; + + if (priv->cfg->scan_tx_antennas[band]) + scan_tx_antennas = priv->cfg->scan_tx_antennas[band]; + + priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv, + priv->scan_tx_ant[band], + scan_tx_antennas); + rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]); + scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags); + + /* In power save mode use one chain, otherwise use all chains */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* rx_ant has been set to all valid chains previously */ + active_chains = rx_ant & + ((u8)(priv->chain_noise_data.active_chains)); + if (!active_chains) + active_chains = rx_ant; + + IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n", + priv->chain_noise_data.active_chains); + + rx_ant = iwl4965_first_antenna(active_chains); + } + + /* MIMO is not used here, but value is required */ + rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; + rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; + rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; + scan->rx_chain = cpu_to_le16(rx_chain); + if (!priv->is_internal_short_scan) { + cmd_len = iwl_legacy_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + vif->addr, + priv->scan_request->ie, + priv->scan_request->ie_len, + IWL_MAX_SCAN_SIZE - sizeof(*scan)); + } else { + /* use bcast addr, will not be transmitted but must be valid */ + cmd_len = iwl_legacy_fill_probe_req(priv, + (struct ieee80211_mgmt *)scan->data, + iwlegacy_bcast_addr, NULL, 0, + IWL_MAX_SCAN_SIZE - sizeof(*scan)); + + } + scan->tx_cmd.len = cpu_to_le16(cmd_len); + + scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | + RXON_FILTER_BCON_AWARE_MSK); + + if (priv->is_internal_short_scan) { + scan->channel_count = + iwl4965_get_single_channel_for_scan(priv, vif, band, + (void *)&scan->data[le16_to_cpu( + scan->tx_cmd.len)]); + } else { + scan->channel_count = + iwl4965_get_channels_for_scan(priv, vif, band, + is_active, n_probes, + (void *)&scan->data[le16_to_cpu( + scan->tx_cmd.len)]); + } + if (scan->channel_count == 0) { + IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); + return -EIO; + } + + cmd.len += le16_to_cpu(scan->tx_cmd.len) + + scan->channel_count * sizeof(struct iwl_scan_channel); + cmd.data = scan; + scan->len = cpu_to_le16(cmd.len); + + set_bit(STATUS_SCAN_HW, &priv->status); + + ret = iwl_legacy_send_cmd_sync(priv, &cmd); + if (ret) + clear_bit(STATUS_SCAN_HW, &priv->status); + + return ret; +} + +int iwl4965_manage_ibss_station(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + if (add) + return iwl4965_add_bssid_station(priv, vif_priv->ctx, + vif->bss_conf.bssid, + &vif_priv->ibss_bssid_sta_id); + return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id, + vif->bss_conf.bssid); +} + +void iwl4965_free_tfds_in_queue(struct iwl_priv *priv, + int sta_id, int tid, int freed) +{ + lockdep_assert_held(&priv->sta_lock); + + if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) + priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; + else { + IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n", + priv->stations[sta_id].tid[tid].tfds_in_queue, + freed); + priv->stations[sta_id].tid[tid].tfds_in_queue = 0; + } +} + +#define IWL_TX_QUEUE_MSK 0xfffff + +static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv) +{ + return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC || + priv->current_ht_config.single_chain_sufficient; +} + +#define IWL_NUM_RX_CHAINS_MULTIPLE 3 +#define IWL_NUM_RX_CHAINS_SINGLE 2 +#define IWL_NUM_IDLE_CHAINS_DUAL 2 +#define IWL_NUM_IDLE_CHAINS_SINGLE 1 + +/* + * Determine how many receiver/antenna chains to use. + * + * More provides better reception via diversity. Fewer saves power + * at the expense of throughput, but only when not in powersave to + * start with. + * + * MIMO (dual stream) requires at least 2, but works better with 3. + * This does not determine *which* chains to use, just how many. + */ +static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv) +{ + /* # of Rx chains to use when expecting MIMO. */ + if (iwl4965_is_single_rx_stream(priv)) + return IWL_NUM_RX_CHAINS_SINGLE; + else + return IWL_NUM_RX_CHAINS_MULTIPLE; +} + +/* + * When we are in power saving mode, unless device support spatial + * multiplexing power save, use the active count for rx chain count. + */ +static int +iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) +{ + /* # Rx chains when idling, depending on SMPS mode */ + switch (priv->current_ht_config.smps) { + case IEEE80211_SMPS_STATIC: + case IEEE80211_SMPS_DYNAMIC: + return IWL_NUM_IDLE_CHAINS_SINGLE; + case IEEE80211_SMPS_OFF: + return active_cnt; + default: + WARN(1, "invalid SMPS mode %d", + priv->current_ht_config.smps); + return active_cnt; + } +} + +/* up to 4 chains */ +static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap) +{ + u8 res; + res = (chain_bitmap & BIT(0)) >> 0; + res += (chain_bitmap & BIT(1)) >> 1; + res += (chain_bitmap & BIT(2)) >> 2; + res += (chain_bitmap & BIT(3)) >> 3; + return res; +} + +/** + * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image + * + * Selects how many and which Rx receivers/antennas/chains to use. + * This should not be used for scan command ... it puts data in wrong place. + */ +void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + bool is_single = iwl4965_is_single_rx_stream(priv); + bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); + u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; + u32 active_chains; + u16 rx_chain; + + /* Tell uCode which antennas are actually connected. + * Before first association, we assume all antennas are connected. + * Just after first association, iwl4965_chain_noise_calibration() + * checks which antennas actually *are* connected. */ + if (priv->chain_noise_data.active_chains) + active_chains = priv->chain_noise_data.active_chains; + else + active_chains = priv->hw_params.valid_rx_ant; + + rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS; + + /* How many receivers should we use? */ + active_rx_cnt = iwl4965_get_active_rx_chain_count(priv); + idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt); + + + /* correct rx chain count according hw settings + * and chain noise calibration + */ + valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains); + if (valid_rx_cnt < active_rx_cnt) + active_rx_cnt = valid_rx_cnt; + + if (valid_rx_cnt < idle_rx_cnt) + idle_rx_cnt = valid_rx_cnt; + + rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; + rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; + + ctx->staging.rx_chain = cpu_to_le16(rx_chain); + + if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) + ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK; + else + ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK; + + IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n", + ctx->staging.rx_chain, + active_rx_cnt, idle_rx_cnt); + + WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 || + active_rx_cnt < idle_rx_cnt); +} + +u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid) +{ + int i; + u8 ind = ant; + + for (i = 0; i < RATE_ANT_NUM - 1; i++) { + ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0; + if (valid & BIT(ind)) + return ind; + } + return ant; +} + +static const char *iwl4965_get_fh_string(int cmd) +{ + switch (cmd) { + IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); + IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); + IWL_CMD(FH_RSCSR_CHNL0_WPTR); + IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG); + IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG); + IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG); + IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV); + IWL_CMD(FH_TSSR_TX_STATUS_REG); + IWL_CMD(FH_TSSR_TX_ERROR_REG); + default: + return "UNKNOWN"; + } +} + +int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display) +{ + int i; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + int pos = 0; + size_t bufsz = 0; +#endif + static const u32 fh_tbl[] = { + FH_RSCSR_CHNL0_STTS_WPTR_REG, + FH_RSCSR_CHNL0_RBDCB_BASE_REG, + FH_RSCSR_CHNL0_WPTR, + FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_MEM_RSSR_SHARED_CTRL_REG, + FH_MEM_RSSR_RX_STATUS_REG, + FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV, + FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_ERROR_REG + }; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (display) { + bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + pos += scnprintf(*buf + pos, bufsz - pos, + "FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + pos += scnprintf(*buf + pos, bufsz - pos, + " %34s: 0X%08x\n", + iwl4965_get_fh_string(fh_tbl[i]), + iwl_legacy_read_direct32(priv, fh_tbl[i])); + } + return pos; + } +#endif + IWL_ERR(priv, "FH register values:\n"); + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + IWL_ERR(priv, " %34s: 0X%08x\n", + iwl4965_get_fh_string(fh_tbl[i]), + iwl_legacy_read_direct32(priv, fh_tbl[i])); + } + return 0; +} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c new file mode 100644 index 000000000000..31ac672b64e1 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c @@ -0,0 +1,2870 @@ +/****************************************************************************** + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/wireless.h> +#include <net/mac80211.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> + +#include <linux/workqueue.h> + +#include "iwl-dev.h" +#include "iwl-sta.h" +#include "iwl-core.h" +#include "iwl-4965.h" + +#define IWL4965_RS_NAME "iwl-4965-rs" + +#define NUM_TRY_BEFORE_ANT_TOGGLE 1 +#define IWL_NUMBER_TRY 1 +#define IWL_HT_NUMBER_TRY 3 + +#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */ +#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */ +#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */ + +/* max allowed rate miss before sync LQ cmd */ +#define IWL_MISSED_RATE_MAX 15 +/* max time to accum history 2 seconds */ +#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ) + +static u8 rs_ht_to_legacy[] = { + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX +}; + +static const u8 ant_toggle_lookup[] = { + /*ANT_NONE -> */ ANT_NONE, + /*ANT_A -> */ ANT_B, + /*ANT_B -> */ ANT_C, + /*ANT_AB -> */ ANT_BC, + /*ANT_C -> */ ANT_A, + /*ANT_AC -> */ ANT_AB, + /*ANT_BC -> */ ANT_AC, + /*ANT_ABC -> */ ANT_ABC, +}; + +#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ + [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ + IWL_RATE_SISO_##s##M_PLCP, \ + IWL_RATE_MIMO2_##s##M_PLCP,\ + IWL_RATE_##r##M_IEEE, \ + IWL_RATE_##ip##M_INDEX, \ + IWL_RATE_##in##M_INDEX, \ + IWL_RATE_##rp##M_INDEX, \ + IWL_RATE_##rn##M_INDEX, \ + IWL_RATE_##pp##M_INDEX, \ + IWL_RATE_##np##M_INDEX } + +/* + * Parameter order: + * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate + * + * If there isn't a valid next or previous rate then INV is used which + * maps to IWL_RATE_INVALID + * + */ +const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = { + IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ + IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ + IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ + IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ + IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ + IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ + IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ + IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ + IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ + IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ + IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ + IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ + IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ +}; + +static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags) +{ + int idx = 0; + + /* HT rate format */ + if (rate_n_flags & RATE_MCS_HT_MSK) { + idx = (rate_n_flags & 0xff); + + if (idx >= IWL_RATE_MIMO2_6M_PLCP) + idx = idx - IWL_RATE_MIMO2_6M_PLCP; + + idx += IWL_FIRST_OFDM_RATE; + /* skip 9M not supported in ht*/ + if (idx >= IWL_RATE_9M_INDEX) + idx += 1; + if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE)) + return idx; + + /* legacy rate format, search for match in table */ + } else { + for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++) + if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF)) + return idx; + } + + return -1; +} + +static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta); +static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, u32 rate_n_flags); +static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, + bool force_search); + +#ifdef CONFIG_MAC80211_DEBUGFS +static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index); +#else +static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{} +#endif + +/** + * The following tables contain the expected throughput metrics for all rates + * + * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits + * + * where invalid entries are zeros. + * + * CCK rates are only valid in legacy table and will only be used in G + * (2.4 GHz) band. + */ + +static s32 expected_tpt_legacy[IWL_RATE_COUNT] = { + 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0 +}; + +static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */ + {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */ + {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */ + {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */ +}; + +static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ + {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ + {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */ + {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */ +}; + +static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */ + {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */ + {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */ + {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/ +}; + +static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ + {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ + {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */ + {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */ +}; + +/* mbps, mcs */ +static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { + { "1", "BPSK DSSS"}, + { "2", "QPSK DSSS"}, + {"5.5", "BPSK CCK"}, + { "11", "QPSK CCK"}, + { "6", "BPSK 1/2"}, + { "9", "BPSK 1/2"}, + { "12", "QPSK 1/2"}, + { "18", "QPSK 3/4"}, + { "24", "16QAM 1/2"}, + { "36", "16QAM 3/4"}, + { "48", "64QAM 2/3"}, + { "54", "64QAM 3/4"}, + { "60", "64QAM 5/6"}, +}; + +#define MCS_INDEX_PER_STREAM (8) + +static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags) +{ + return (u8)(rate_n_flags & 0xFF); +} + +static void +iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window) +{ + window->data = 0; + window->success_counter = 0; + window->success_ratio = IWL_INVALID_VALUE; + window->counter = 0; + window->average_tpt = IWL_INVALID_VALUE; + window->stamp = 0; +} + +static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type) +{ + return (ant_type & valid_antenna) == ant_type; +} + +/* + * removes the old data from the statistics. All data that is older than + * TID_MAX_TIME_DIFF, will be deleted. + */ +static void +iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time) +{ + /* The oldest age we want to keep */ + u32 oldest_time = curr_time - TID_MAX_TIME_DIFF; + + while (tl->queue_count && + (tl->time_stamp < oldest_time)) { + tl->total -= tl->packet_count[tl->head]; + tl->packet_count[tl->head] = 0; + tl->time_stamp += TID_QUEUE_CELL_SPACING; + tl->queue_count--; + tl->head++; + if (tl->head >= TID_QUEUE_MAX_SIZE) + tl->head = 0; + } +} + +/* + * increment traffic load value for tid and also remove + * any old values if passed the certain time period + */ +static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data, + struct ieee80211_hdr *hdr) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + u8 tid; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } else + return MAX_TID_COUNT; + + if (unlikely(tid >= TID_MAX_LOAD_COUNT)) + return MAX_TID_COUNT; + + tl = &lq_data->load[tid]; + + curr_time -= curr_time % TID_ROUND_VALUE; + + /* Happens only for the first packet. Initialize the data */ + if (!(tl->queue_count)) { + tl->total = 1; + tl->time_stamp = curr_time; + tl->queue_count = 1; + tl->head = 0; + tl->packet_count[0] = 1; + return MAX_TID_COUNT; + } + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + iwl4965_rs_tl_rm_old_stats(tl, curr_time); + + index = (tl->head + index) % TID_QUEUE_MAX_SIZE; + tl->packet_count[index] = tl->packet_count[index] + 1; + tl->total = tl->total + 1; + + if ((index + 1) > tl->queue_count) + tl->queue_count = index + 1; + + return tid; +} + +/* + get the traffic load value for tid +*/ +static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid) +{ + u32 curr_time = jiffies_to_msecs(jiffies); + u32 time_diff; + s32 index; + struct iwl_traffic_load *tl = NULL; + + if (tid >= TID_MAX_LOAD_COUNT) + return 0; + + tl = &(lq_data->load[tid]); + + curr_time -= curr_time % TID_ROUND_VALUE; + + if (!(tl->queue_count)) + return 0; + + time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); + index = time_diff / TID_QUEUE_CELL_SPACING; + + /* The history is too long: remove data that is older than */ + /* TID_MAX_TIME_DIFF */ + if (index >= TID_QUEUE_MAX_SIZE) + iwl4965_rs_tl_rm_old_stats(tl, curr_time); + + return tl->total; +} + +static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, + struct iwl_lq_sta *lq_data, u8 tid, + struct ieee80211_sta *sta) +{ + int ret = -EAGAIN; + u32 load; + + load = iwl4965_rs_tl_get_load(lq_data, tid); + + if (load > IWL_AGG_LOAD_THRESHOLD) { + IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", + sta->addr, tid); + ret = ieee80211_start_tx_ba_session(sta, tid, 5000); + if (ret == -EAGAIN) { + /* + * driver and mac80211 is out of sync + * this might be cause by reloading firmware + * stop the tx ba session here + */ + IWL_ERR(priv, "Fail start Tx agg on tid: %d\n", + tid); + ieee80211_stop_tx_ba_session(sta, tid); + } + } else { + IWL_ERR(priv, "Aggregation not enabled for tid %d " + "because load = %u\n", tid, load); + } + return ret; +} + +static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, + struct iwl_lq_sta *lq_data, + struct ieee80211_sta *sta) +{ + if (tid < TID_MAX_LOAD_COUNT) + iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); + else + IWL_ERR(priv, "tid exceeds max load count: %d/%d\n", + tid, TID_MAX_LOAD_COUNT); +} + +static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags) +{ + return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_B_MSK) + + !!(rate_n_flags & RATE_MCS_ANT_C_MSK); +} + +/* + * Static function to get the expected throughput from an iwl_scale_tbl_info + * that wraps a NULL pointer check + */ +static s32 +iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index) +{ + if (tbl->expected_tpt) + return tbl->expected_tpt[rs_index]; + return 0; +} + +/** + * iwl4965_rs_collect_tx_data - Update the success/failure sliding window + * + * We keep a sliding window of the last 62 packets transmitted + * at this rate. window->data contains the bitmask of successful + * packets. + */ +static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes) +{ + struct iwl_rate_scale_data *window = NULL; + static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1)); + s32 fail_count, tpt; + + if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) + return -EINVAL; + + /* Select window for current tx bit rate */ + window = &(tbl->win[scale_index]); + + /* Get expected throughput */ + tpt = iwl4965_get_expected_tpt(tbl, scale_index); + + /* + * Keep track of only the latest 62 tx frame attempts in this rate's + * history window; anything older isn't really relevant any more. + * If we have filled up the sliding window, drop the oldest attempt; + * if the oldest attempt (highest bit in bitmap) shows "success", + * subtract "1" from the success counter (this is the main reason + * we keep these bitmaps!). + */ + while (attempts > 0) { + if (window->counter >= IWL_RATE_MAX_WINDOW) { + + /* remove earliest */ + window->counter = IWL_RATE_MAX_WINDOW - 1; + + if (window->data & mask) { + window->data &= ~mask; + window->success_counter--; + } + } + + /* Increment frames-attempted counter */ + window->counter++; + + /* Shift bitmap by one frame to throw away oldest history */ + window->data <<= 1; + + /* Mark the most recent #successes attempts as successful */ + if (successes > 0) { + window->success_counter++; + window->data |= 0x1; + successes--; + } + + attempts--; + } + + /* Calculate current success ratio, avoid divide-by-0! */ + if (window->counter > 0) + window->success_ratio = 128 * (100 * window->success_counter) + / window->counter; + else + window->success_ratio = IWL_INVALID_VALUE; + + fail_count = window->counter - window->success_counter; + + /* Calculate average throughput, if we have enough history. */ + if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) || + (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH)) + window->average_tpt = (window->success_ratio * tpt + 64) / 128; + else + window->average_tpt = IWL_INVALID_VALUE; + + /* Tag this window as having been updated */ + window->stamp = jiffies; + + return 0; +} + +/* + * Fill uCode API rate_n_flags field, based on "search" or "active" table. + */ +static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv, + struct iwl_scale_tbl_info *tbl, + int index, u8 use_green) +{ + u32 rate_n_flags = 0; + + if (is_legacy(tbl->lq_type)) { + rate_n_flags = iwlegacy_rates[index].plcp; + if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE) + rate_n_flags |= RATE_MCS_CCK_MSK; + + } else if (is_Ht(tbl->lq_type)) { + if (index > IWL_LAST_OFDM_RATE) { + IWL_ERR(priv, "Invalid HT rate index %d\n", index); + index = IWL_LAST_OFDM_RATE; + } + rate_n_flags = RATE_MCS_HT_MSK; + + if (is_siso(tbl->lq_type)) + rate_n_flags |= iwlegacy_rates[index].plcp_siso; + else + rate_n_flags |= iwlegacy_rates[index].plcp_mimo2; + } else { + IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type); + } + + rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) & + RATE_MCS_ANT_ABC_MSK); + + if (is_Ht(tbl->lq_type)) { + if (tbl->is_ht40) { + if (tbl->is_dup) + rate_n_flags |= RATE_MCS_DUP_MSK; + else + rate_n_flags |= RATE_MCS_HT40_MSK; + } + if (tbl->is_SGI) + rate_n_flags |= RATE_MCS_SGI_MSK; + + if (use_green) { + rate_n_flags |= RATE_MCS_GF_MSK; + if (is_siso(tbl->lq_type) && tbl->is_SGI) { + rate_n_flags &= ~RATE_MCS_SGI_MSK; + IWL_ERR(priv, "GF was set with SGI:SISO\n"); + } + } + } + return rate_n_flags; +} + +/* + * Interpret uCode API's rate_n_flags format, + * fill "search" or "active" tx mode table. + */ +static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags, + enum ieee80211_band band, + struct iwl_scale_tbl_info *tbl, + int *rate_idx) +{ + u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK); + u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags); + u8 mcs; + + memset(tbl, 0, sizeof(struct iwl_scale_tbl_info)); + *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags); + + if (*rate_idx == IWL_RATE_INVALID) { + *rate_idx = -1; + return -EINVAL; + } + tbl->is_SGI = 0; /* default legacy setup */ + tbl->is_ht40 = 0; + tbl->is_dup = 0; + tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS); + tbl->lq_type = LQ_NONE; + tbl->max_search = IWL_MAX_SEARCH; + + /* legacy rate format */ + if (!(rate_n_flags & RATE_MCS_HT_MSK)) { + if (iwl4965_num_of_ant == 1) { + if (band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + } + /* HT rate format */ + } else { + if (rate_n_flags & RATE_MCS_SGI_MSK) + tbl->is_SGI = 1; + + if ((rate_n_flags & RATE_MCS_HT40_MSK) || + (rate_n_flags & RATE_MCS_DUP_MSK)) + tbl->is_ht40 = 1; + + if (rate_n_flags & RATE_MCS_DUP_MSK) + tbl->is_dup = 1; + + mcs = iwl4965_rs_extract_rate(rate_n_flags); + + /* SISO */ + if (mcs <= IWL_RATE_SISO_60M_PLCP) { + if (iwl4965_num_of_ant == 1) + tbl->lq_type = LQ_SISO; /*else NONE*/ + /* MIMO2 */ + } else { + if (iwl4965_num_of_ant == 2) + tbl->lq_type = LQ_MIMO2; + } + } + return 0; +} + +/* switch to another antenna/antennas and return 1 */ +/* if no other valid antenna found, return 0 */ +static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, + struct iwl_scale_tbl_info *tbl) +{ + u8 new_ant_type; + + if (!tbl->ant_type || tbl->ant_type > ANT_ABC) + return 0; + + if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type)) + return 0; + + new_ant_type = ant_toggle_lookup[tbl->ant_type]; + + while ((new_ant_type != tbl->ant_type) && + !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type)) + new_ant_type = ant_toggle_lookup[new_ant_type]; + + if (new_ant_type == tbl->ant_type) + return 0; + + tbl->ant_type = new_ant_type; + *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK; + *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS; + return 1; +} + +/** + * Green-field mode is valid if the station supports it and + * there are no non-GF stations present in the BSS. + */ +static bool iwl4965_rs_use_green(struct ieee80211_sta *sta) +{ + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && + !(ctx->ht.non_gf_sta_present); +} + +/** + * iwl4965_rs_get_supported_rates - get the available rates + * + * if management frame or broadcast frame only return + * basic available rates. + * + */ +static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta, + struct ieee80211_hdr *hdr, + enum iwl_table_type rate_type) +{ + if (is_legacy(rate_type)) { + return lq_sta->active_legacy_rate; + } else { + if (is_siso(rate_type)) + return lq_sta->active_siso_rate; + else + return lq_sta->active_mimo2_rate; + } +} + +static u16 +iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask, + int rate_type) +{ + u8 high = IWL_RATE_INVALID; + u8 low = IWL_RATE_INVALID; + + /* 802.11A or ht walks to the next literal adjacent rate in + * the rate table */ + if (is_a_band(rate_type) || !is_legacy(rate_type)) { + int i; + u32 mask; + + /* Find the previous rate that is in the rate mask */ + i = index - 1; + for (mask = (1 << i); i >= 0; i--, mask >>= 1) { + if (rate_mask & mask) { + low = i; + break; + } + } + + /* Find the next rate that is in the rate mask */ + i = index + 1; + for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) { + if (rate_mask & mask) { + high = i; + break; + } + } + + return (high << 8) | low; + } + + low = index; + while (low != IWL_RATE_INVALID) { + low = iwlegacy_rates[low].prev_rs; + if (low == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << low)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low); + } + + high = index; + while (high != IWL_RATE_INVALID) { + high = iwlegacy_rates[high].next_rs; + if (high == IWL_RATE_INVALID) + break; + if (rate_mask & (1 << high)) + break; + IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high); + } + + return (high << 8) | low; +} + +static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + u8 scale_index, u8 ht_possible) +{ + s32 low; + u16 rate_mask; + u16 high_low; + u8 switch_to_legacy = 0; + u8 is_green = lq_sta->is_green; + struct iwl_priv *priv = lq_sta->drv; + + /* check if we need to switch from HT to legacy rates. + * assumption is that mandatory rates (1Mbps or 6Mbps) + * are always supported (spec demand) */ + if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { + switch_to_legacy = 1; + scale_index = rs_ht_to_legacy[scale_index]; + if (lq_sta->band == IEEE80211_BAND_5GHZ) + tbl->lq_type = LQ_A; + else + tbl->lq_type = LQ_G; + + if (iwl4965_num_of_ant(tbl->ant_type) > 1) + tbl->ant_type = + iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + + tbl->is_ht40 = 0; + tbl->is_SGI = 0; + tbl->max_search = IWL_MAX_SEARCH; + } + + rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type); + + /* Mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + /* supp_rates has no CCK bits in A mode */ + if (lq_sta->band == IEEE80211_BAND_5GHZ) + rate_mask = (u16)(rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_mask = (u16)(rate_mask & lq_sta->supp_rates); + } + + /* If we switched from HT to legacy, check current rate */ + if (switch_to_legacy && (rate_mask & (1 << scale_index))) { + low = scale_index; + goto out; + } + + high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv, + scale_index, rate_mask, + tbl->lq_type); + low = high_low & 0xff; + + if (low == IWL_RATE_INVALID) + low = scale_index; + +out: + return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green); +} + +/* + * Simple function to compare two rate scale table types + */ +static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a, + struct iwl_scale_tbl_info *b) +{ + return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) && + (a->is_SGI == b->is_SGI); +} + +/* + * mac80211 sends us Tx status + */ +static void +iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta, + struct sk_buff *skb) +{ + int legacy_success; + int retries; + int rs_index, mac_index, i; + struct iwl_lq_sta *lq_sta = priv_sta; + struct iwl_link_quality_cmd *table; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct iwl_priv *priv = (struct iwl_priv *)priv_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + enum mac80211_rate_control_flags mac_flags; + u32 tx_rate; + struct iwl_scale_tbl_info tbl_type; + struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + IWL_DEBUG_RATE_LIMIT(priv, + "get frame ack response, update rate scale window\n"); + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (!lq_sta) { + IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n"); + return; + } else if (!lq_sta->drv) { + IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); + return; + } + + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + /* This packet was aggregated but doesn't carry status info */ + if ((info->flags & IEEE80211_TX_CTL_AMPDU) && + !(info->flags & IEEE80211_TX_STAT_AMPDU)) + return; + + /* + * Ignore this Tx frame response if its initial rate doesn't match + * that of latest Link Quality command. There may be stragglers + * from a previous Link Quality command, but we're no longer interested + * in those; they're either from the "active" mode while we're trying + * to check "search" mode, or a prior "search" mode after we've moved + * to a new "search" mode (which might become the new "active" mode). + */ + table = &lq_sta->lq; + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + iwl4965_rs_get_tbl_info_from_mcs(tx_rate, + priv->band, &tbl_type, &rs_index); + if (priv->band == IEEE80211_BAND_5GHZ) + rs_index -= IWL_FIRST_OFDM_RATE; + mac_flags = info->status.rates[0].flags; + mac_index = info->status.rates[0].idx; + /* For HT packets, map MCS to PLCP */ + if (mac_flags & IEEE80211_TX_RC_MCS) { + mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */ + if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE)) + mac_index++; + /* + * mac80211 HT index is always zero-indexed; we need to move + * HT OFDM rates after CCK rates in 2.4 GHz band + */ + if (priv->band == IEEE80211_BAND_2GHZ) + mac_index += IWL_FIRST_OFDM_RATE; + } + /* Here we actually compare this rate to the latest LQ command */ + if ((mac_index < 0) || + (tbl_type.is_SGI != + !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || + (tbl_type.is_ht40 != + !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || + (tbl_type.is_dup != + !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) || + (tbl_type.ant_type != info->antenna_sel_tx) || + (!!(tx_rate & RATE_MCS_HT_MSK) != + !!(mac_flags & IEEE80211_TX_RC_MCS)) || + (!!(tx_rate & RATE_MCS_GF_MSK) != + !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || + (rs_index != mac_index)) { + IWL_DEBUG_RATE(priv, + "initial rate %d does not match %d (0x%x)\n", + mac_index, rs_index, tx_rate); + /* + * Since rates mis-match, the last LQ command may have failed. + * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with + * ... driver. + */ + lq_sta->missed_rate_counter++; + if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { + lq_sta->missed_rate_counter = 0; + iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, + CMD_ASYNC, false); + } + /* Regardless, ignore this status info for outdated rate */ + return; + } else + /* Rate did match, so reset the missed_rate_counter */ + lq_sta->missed_rate_counter = 0; + + /* Figure out if rate scale algorithm is in active or search table */ + if (iwl4965_table_type_matches(&tbl_type, + &(lq_sta->lq_info[lq_sta->active_tbl]))) { + curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + } else if (iwl4965_table_type_matches(&tbl_type, + &lq_sta->lq_info[1 - lq_sta->active_tbl])) { + curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + } else { + IWL_DEBUG_RATE(priv, + "Neither active nor search matches tx rate\n"); + tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n", + tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); + tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]); + IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n", + tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI); + IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n", + tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI); + /* + * no matching table found, let's by-pass the data collection + * and continue to perform rate scale to find the rate table + */ + iwl4965_rs_stay_in_table(lq_sta, true); + goto done; + } + + /* + * Updating the frame history depends on whether packets were + * aggregated. + * + * For aggregation, all packets were transmitted at the same rate, the + * first index into rate scale table. + */ + if (info->flags & IEEE80211_TX_STAT_AMPDU) { + tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); + iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, + &rs_index); + iwl4965_rs_collect_tx_data(curr_tbl, rs_index, + info->status.ampdu_len, + info->status.ampdu_ack_len); + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += info->status.ampdu_ack_len; + lq_sta->total_failed += (info->status.ampdu_len - + info->status.ampdu_ack_len); + } + } else { + /* + * For legacy, update frame history with for each Tx retry. + */ + retries = info->status.rates[0].count - 1; + /* HW doesn't send more than 15 retries */ + retries = min(retries, 15); + + /* The last transmission may have been successful */ + legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK); + /* Collect data for each rate used during failed TX attempts */ + for (i = 0; i <= retries; ++i) { + tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags); + iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, + &tbl_type, &rs_index); + /* + * Only collect stats if retried rate is in the same RS + * table as active/search. + */ + if (iwl4965_table_type_matches(&tbl_type, curr_tbl)) + tmp_tbl = curr_tbl; + else if (iwl4965_table_type_matches(&tbl_type, + other_tbl)) + tmp_tbl = other_tbl; + else + continue; + iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1, + i < retries ? 0 : legacy_success); + } + + /* Update success/fail counts if not searching for new mode */ + if (lq_sta->stay_in_tbl) { + lq_sta->total_success += legacy_success; + lq_sta->total_failed += retries + (1 - legacy_success); + } + } + /* The last TX rate is cached in lq_sta; it's set in if/else above */ + lq_sta->last_rate_n_flags = tx_rate; +done: + /* See if there's a better rate or modulation mode to try. */ + if (sta && sta->supp_rates[sband->band]) + iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta); +} + +/* + * Begin a period of staying with a selected modulation mode. + * Set "stay_in_tbl" flag to prevent any mode switches. + * Set frame tx success limits according to legacy vs. high-throughput, + * and reset overall (spanning all rates) tx success history statistics. + * These control how long we stay using same modulation mode before + * searching for a new mode. + */ +static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy, + struct iwl_lq_sta *lq_sta) +{ + IWL_DEBUG_RATE(priv, "we are staying in the same table\n"); + lq_sta->stay_in_tbl = 1; /* only place this gets set */ + if (is_legacy) { + lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT; + } else { + lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT; + lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT; + lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT; + } + lq_sta->table_count = 0; + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = jiffies; + lq_sta->action_counter = 0; +} + +/* + * Find correct throughput table for given mode of modulation + */ +static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl) +{ + /* Used to choose among HT tables */ + s32 (*ht_tbl_pointer)[IWL_RATE_COUNT]; + + /* Check for invalid LQ type */ + if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Legacy rates have only one table */ + if (is_legacy(tbl->lq_type)) { + tbl->expected_tpt = expected_tpt_legacy; + return; + } + + /* Choose among many HT tables depending on number of streams + * (SISO/MIMO2), channel width (20/40), SGI, and aggregation + * status */ + if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_siso20MHz; + else if (is_siso(tbl->lq_type)) + ht_tbl_pointer = expected_tpt_siso40MHz; + else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) + ht_tbl_pointer = expected_tpt_mimo2_20MHz; + else /* if (is_mimo2(tbl->lq_type)) <-- must be true */ + ht_tbl_pointer = expected_tpt_mimo2_40MHz; + + if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ + tbl->expected_tpt = ht_tbl_pointer[0]; + else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */ + tbl->expected_tpt = ht_tbl_pointer[1]; + else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */ + tbl->expected_tpt = ht_tbl_pointer[2]; + else /* AGG+SGI */ + tbl->expected_tpt = ht_tbl_pointer[3]; +} + +/* + * Find starting rate for new "search" high-throughput mode of modulation. + * Goal is to find lowest expected rate (under perfect conditions) that is + * above the current measured throughput of "active" mode, to give new mode + * a fair chance to prove itself without too many challenges. + * + * This gets called when transitioning to more aggressive modulation + * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive + * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need + * to decrease to match "active" throughput. When moving from MIMO to SISO, + * bit rate will typically need to increase, but not if performance was bad. + */ +static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, /* "search" */ + u16 rate_mask, s8 index) +{ + /* "active" values */ + struct iwl_scale_tbl_info *active_tbl = + &(lq_sta->lq_info[lq_sta->active_tbl]); + s32 active_sr = active_tbl->win[index].success_ratio; + s32 active_tpt = active_tbl->expected_tpt[index]; + + /* expected "search" throughput */ + s32 *tpt_tbl = tbl->expected_tpt; + + s32 new_rate, high, low, start_hi; + u16 high_low; + s8 rate = index; + + new_rate = high = low = start_hi = IWL_RATE_INVALID; + + for (; ;) { + high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask, + tbl->lq_type); + + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* + * Lower the "search" bit rate, to give new "search" mode + * approximately the same throughput as "active" if: + * + * 1) "Active" mode has been working modestly well (but not + * great), and expected "search" throughput (under perfect + * conditions) at candidate rate is above the actual + * measured "active" throughput (but less than expected + * "active" throughput under perfect conditions). + * OR + * 2) "Active" mode has been working perfectly or very well + * and expected "search" throughput (under perfect + * conditions) at candidate rate is above expected + * "active" throughput (under perfect conditions). + */ + if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) && + ((active_sr > IWL_RATE_DECREASE_TH) && + (active_sr <= IWL_RATE_HIGH_TH) && + (tpt_tbl[rate] <= active_tpt))) || + ((active_sr >= IWL_RATE_SCALE_SWITCH) && + (tpt_tbl[rate] > active_tpt))) { + + /* (2nd or later pass) + * If we've already tried to raise the rate, and are + * now trying to lower it, use the higher rate. */ + if (start_hi != IWL_RATE_INVALID) { + new_rate = start_hi; + break; + } + + new_rate = rate; + + /* Loop again with lower rate */ + if (low != IWL_RATE_INVALID) + rate = low; + + /* Lower rate not available, use the original */ + else + break; + + /* Else try to raise the "search" rate to match "active" */ + } else { + /* (2nd or later pass) + * If we've already tried to lower the rate, and are + * now trying to raise it, use the lower rate. */ + if (new_rate != IWL_RATE_INVALID) + break; + + /* Loop again with higher rate */ + else if (high != IWL_RATE_INVALID) { + start_hi = high; + rate = high; + + /* Higher rate not available, use the original */ + } else { + new_rate = rate; + break; + } + } + } + + return new_rate; +} + +/* + * Set up search table for MIMO2 + */ +static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + s32 rate; + s8 is_green = lq_sta->is_green; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) + == WLAN_HT_CAP_SM_PS_STATIC) + return -1; + + /* Need both Tx chains/antennas to support MIMO */ + if (priv->hw_params.tx_chains_num < 2) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n"); + + tbl->lq_type = LQ_MIMO2; + tbl->is_dup = lq_sta->is_dup; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_mimo2_rate; + + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); + + rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", + rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, + "Can't switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, + tbl, rate, is_green); + + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Set up search table for SISO + */ +static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, int index) +{ + u16 rate_mask; + u8 is_green = lq_sta->is_green; + s32 rate; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported) + return -1; + + IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n"); + + tbl->is_dup = lq_sta->is_dup; + tbl->lq_type = LQ_SISO; + tbl->action = 0; + tbl->max_search = IWL_MAX_SEARCH; + rate_mask = lq_sta->active_siso_rate; + + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + tbl->is_ht40 = 1; + else + tbl->is_ht40 = 0; + + if (is_green) + tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/ + + iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); + rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index); + + IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask); + if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) { + IWL_DEBUG_RATE(priv, + "can not switch with index %d rate mask %x\n", + rate, rate_mask); + return -1; + } + tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, + tbl, rate, is_green); + IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n", + tbl->current_rate, is_green); + return 0; +} + +/* + * Try to switch to new modulation mode from legacy + */ +static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + int index) +{ + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + int ret = 0; + u8 update_search_tbl_counter = 0; + + tbl->action = IWL_LEGACY_SWITCH_SISO; + + start_action = tbl->action; + for (; ;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_LEGACY_SWITCH_ANTENNA1: + case IWL_LEGACY_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n"); + + if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + /* Don't change antenna if success has been great */ + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + /* Set up search table to try other antenna */ + memcpy(search_tbl, tbl, sz); + + if (iwl4965_rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + iwl4965_rs_set_expected_tpt_table(lq_sta, + search_tbl); + goto out; + } + break; + case IWL_LEGACY_SWITCH_SISO: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n"); + + /* Set up search table to try SISO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + + break; + case IWL_LEGACY_SWITCH_MIMO2_AB: + case IWL_LEGACY_SWITCH_MIMO2_AC: + case IWL_LEGACY_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n"); + + /* Set up search table to try MIMO */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta, + conf, sta, + search_tbl, index); + if (!ret) { + lq_sta->action_counter = 0; + goto out; + } + break; + } + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + + } + search_tbl->lq_type = LQ_NONE; + return 0; + +out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC) + tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + return 0; + +} + +/* + * Try to switch to new modulation mode from SISO + */ +static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + u8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + start_action = tbl->action; + + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_SISO_SWITCH_ANTENNA1: + case IWL_SISO_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n"); + if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 && + tx_chains_num <= 1) || + (tbl->action == IWL_SISO_SWITCH_ANTENNA2 && + tx_chains_num <= 2)) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (iwl4965_rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_SISO_SWITCH_MIMO2_AB: + case IWL_SISO_SWITCH_MIMO2_AC: + case IWL_SISO_SWITCH_MIMO2_BC: + IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n"); + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = 0; + + if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) + search_tbl->ant_type = ANT_AB; + else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC) + search_tbl->ant_type = ANT_AC; + else + search_tbl->ant_type = ANT_BC; + + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta, + conf, sta, + search_tbl, index); + if (!ret) + goto out; + break; + case IWL_SISO_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n"); + + memcpy(search_tbl, tbl, sz); + if (is_green) { + if (!tbl->is_SGI) + break; + else + IWL_ERR(priv, + "SGI was set in GF+SISO\n"); + } + search_tbl->is_SGI = !tbl->is_SGI; + iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl); + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + iwl4965_rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + } + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_GI) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_SISO_SWITCH_GI) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; +} + +/* + * Try to switch to new modulation mode from MIMO2 + */ +static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, int index) +{ + s8 is_green = lq_sta->is_green; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + struct iwl_scale_tbl_info *search_tbl = + &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + struct iwl_rate_scale_data *window = &(tbl->win[index]); + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + u32 sz = (sizeof(struct iwl_scale_tbl_info) - + (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); + u8 start_action; + u8 valid_tx_ant = priv->hw_params.valid_tx_ant; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + u8 update_search_tbl_counter = 0; + int ret; + + start_action = tbl->action; + for (;;) { + lq_sta->action_counter++; + switch (tbl->action) { + case IWL_MIMO2_SWITCH_ANTENNA1: + case IWL_MIMO2_SWITCH_ANTENNA2: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n"); + + if (tx_chains_num <= 2) + break; + + if (window->success_ratio >= IWL_RS_GOOD_RATIO) + break; + + memcpy(search_tbl, tbl, sz); + if (iwl4965_rs_toggle_antenna(valid_tx_ant, + &search_tbl->current_rate, search_tbl)) { + update_search_tbl_counter = 1; + goto out; + } + break; + case IWL_MIMO2_SWITCH_SISO_A: + case IWL_MIMO2_SWITCH_SISO_B: + case IWL_MIMO2_SWITCH_SISO_C: + IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n"); + + /* Set up new search table for SISO */ + memcpy(search_tbl, tbl, sz); + + if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) + search_tbl->ant_type = ANT_A; + else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) + search_tbl->ant_type = ANT_B; + else + search_tbl->ant_type = ANT_C; + + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, + search_tbl->ant_type)) + break; + + ret = iwl4965_rs_switch_to_siso(priv, lq_sta, + conf, sta, + search_tbl, index); + if (!ret) + goto out; + + break; + + case IWL_MIMO2_SWITCH_GI: + if (!tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_20)) + break; + if (tbl->is_ht40 && !(ht_cap->cap & + IEEE80211_HT_CAP_SGI_40)) + break; + + IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n"); + + /* Set up new search table for MIMO2 */ + memcpy(search_tbl, tbl, sz); + search_tbl->is_SGI = !tbl->is_SGI; + iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl); + /* + * If active table already uses the fastest possible + * modulation (dual stream with short guard interval), + * and it's working well, there's no need to look + * for a better type of modulation! + */ + if (tbl->is_SGI) { + s32 tpt = lq_sta->last_tpt / 100; + if (tpt >= search_tbl->expected_tpt[index]) + break; + } + search_tbl->current_rate = + iwl4965_rate_n_flags_from_tbl(priv, search_tbl, + index, is_green); + update_search_tbl_counter = 1; + goto out; + + } + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_GI) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + + if (tbl->action == start_action) + break; + } + search_tbl->lq_type = LQ_NONE; + return 0; + out: + lq_sta->search_better_tbl = 1; + tbl->action++; + if (tbl->action > IWL_MIMO2_SWITCH_GI) + tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; + if (update_search_tbl_counter) + search_tbl->action = tbl->action; + + return 0; + +} + +/* + * Check whether we should continue using same modulation mode, or + * begin search for a new mode, based on: + * 1) # tx successes or failures while using this mode + * 2) # times calling this function + * 3) elapsed time in this mode (not used, for now) + */ +static void +iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) +{ + struct iwl_scale_tbl_info *tbl; + int i; + int active_tbl; + int flush_interval_passed = 0; + struct iwl_priv *priv; + + priv = lq_sta->drv; + active_tbl = lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + /* If we've been disallowing search, see if we should now allow it */ + if (lq_sta->stay_in_tbl) { + + /* Elapsed time using current modulation mode */ + if (lq_sta->flush_timer) + flush_interval_passed = + time_after(jiffies, + (unsigned long)(lq_sta->flush_timer + + IWL_RATE_SCALE_FLUSH_INTVL)); + + /* + * Check if we should allow search for new modulation mode. + * If many frames have failed or succeeded, or we've used + * this same modulation for a long time, allow search, and + * reset history stats that keep track of whether we should + * allow a new search. Also (below) reset all bitmaps and + * stats in active history. + */ + if (force_search || + (lq_sta->total_failed > lq_sta->max_failure_limit) || + (lq_sta->total_success > lq_sta->max_success_limit) || + ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) + && (flush_interval_passed))) { + IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:", + lq_sta->total_failed, + lq_sta->total_success, + flush_interval_passed); + + /* Allow search for new mode */ + lq_sta->stay_in_tbl = 0; /* only place reset */ + lq_sta->total_failed = 0; + lq_sta->total_success = 0; + lq_sta->flush_timer = 0; + + /* + * Else if we've used this modulation mode enough repetitions + * (regardless of elapsed time or success/failure), reset + * history bitmaps and rate-specific stats for all rates in + * active table. + */ + } else { + lq_sta->table_count++; + if (lq_sta->table_count >= + lq_sta->table_count_limit) { + lq_sta->table_count = 0; + + IWL_DEBUG_RATE(priv, + "LQ: stay in table clear win\n"); + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &(tbl->win[i])); + } + } + + /* If transitioning to allow "search", reset all history + * bitmaps and stats in active table (this will become the new + * "search" table). */ + if (!lq_sta->stay_in_tbl) { + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &(tbl->win[i])); + } + } +} + +/* + * setup rate table in uCode + * return rate_n_flags as used in the table + */ +static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int index, u8 is_green) +{ + u32 rate; + + /* Update uCode's rate table. */ + rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green); + iwl4965_rs_fill_link_cmd(priv, lq_sta, rate); + iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); + + return rate; +} + +/* + * Do rate scaling and search for new modulation mode. + */ +static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv, + struct sk_buff *skb, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int low = IWL_RATE_INVALID; + int high = IWL_RATE_INVALID; + int index; + int i; + struct iwl_rate_scale_data *window = NULL; + int current_tpt = IWL_INVALID_VALUE; + int low_tpt = IWL_INVALID_VALUE; + int high_tpt = IWL_INVALID_VALUE; + u32 fail_count; + s8 scale_action = 0; + u16 rate_mask; + u8 update_lq = 0; + struct iwl_scale_tbl_info *tbl, *tbl1; + u16 rate_scale_index_msk = 0; + u32 rate; + u8 is_green = 0; + u8 active_tbl = 0; + u8 done_search = 0; + u16 high_low; + s32 sr; + u8 tid = MAX_TID_COUNT; + struct iwl_tid_data *tid_data; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n"); + + /* Send management frames and NO_ACK data using lowest rate. */ + /* TODO: this could probably be improved.. */ + if (!ieee80211_is_data(hdr->frame_control) || + info->flags & IEEE80211_TX_CTL_NO_ACK) + return; + + if (!sta || !lq_sta) + return; + + lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; + + tid = iwl4965_rs_tl_add_packet(lq_sta, hdr); + if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) { + tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid]; + if (tid_data->agg.state == IWL_AGG_OFF) + lq_sta->is_agg = 0; + else + lq_sta->is_agg = 1; + } else + lq_sta->is_agg = 0; + + /* + * Select rate-scale / modulation-mode table to work with in + * the rest of this function: "search" if searching for better + * modulation mode, or "active" if doing rate scaling within a mode. + */ + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + if (is_legacy(tbl->lq_type)) + lq_sta->is_green = 0; + else + lq_sta->is_green = iwl4965_rs_use_green(sta); + is_green = lq_sta->is_green; + + /* current tx rate */ + index = lq_sta->last_txrate_idx; + + IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index, + tbl->lq_type); + + /* rates available for this association, and for modulation mode */ + rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); + + IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask); + + /* mask with station rate restriction */ + if (is_legacy(tbl->lq_type)) { + if (lq_sta->band == IEEE80211_BAND_5GHZ) + /* supp_rates has no CCK bits in A mode */ + rate_scale_index_msk = (u16) (rate_mask & + (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); + else + rate_scale_index_msk = (u16) (rate_mask & + lq_sta->supp_rates); + + } else + rate_scale_index_msk = rate_mask; + + if (!rate_scale_index_msk) + rate_scale_index_msk = rate_mask; + + if (!((1 << index) & rate_scale_index_msk)) { + IWL_ERR(priv, "Current Rate is not valid\n"); + if (lq_sta->search_better_tbl) { + /* revert to active table if search table is not valid*/ + tbl->lq_type = LQ_NONE; + lq_sta->search_better_tbl = 0; + tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + /* get "active" rate info */ + index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); + rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta, + tbl, index, is_green); + } + return; + } + + /* Get expected throughput table and history window for current rate */ + if (!tbl->expected_tpt) { + IWL_ERR(priv, "tbl->expected_tpt is NULL\n"); + return; + } + + /* force user max rate if set by user */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < index)) { + index = lq_sta->max_rate_idx; + update_lq = 1; + window = &(tbl->win[index]); + goto lq_update; + } + + window = &(tbl->win[index]); + + /* + * If there is not enough history to calculate actual average + * throughput, keep analyzing results of more tx frames, without + * changing rate or mode (bypass most of the rest of this function). + * Set up new rate table in uCode only if old rate is not supported + * in current association (use new rate found above). + */ + fail_count = window->counter - window->success_counter; + if ((fail_count < IWL_RATE_MIN_FAILURE_TH) && + (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) { + IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d " + "for index %d\n", + window->success_counter, window->counter, index); + + /* Can't calculate this yet; not enough history */ + window->average_tpt = IWL_INVALID_VALUE; + + /* Should we stay with this modulation mode, + * or search for a new one? */ + iwl4965_rs_stay_in_table(lq_sta, false); + + goto out; + } + /* Else we have enough samples; calculate estimate of + * actual average throughput */ + if (window->average_tpt != ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128)) { + IWL_ERR(priv, + "expected_tpt should have been calculated by now\n"); + window->average_tpt = ((window->success_ratio * + tbl->expected_tpt[index] + 64) / 128); + } + + /* If we are searching for better modulation mode, check success. */ + if (lq_sta->search_better_tbl) { + /* If good success, continue using the "search" mode; + * no need to send new link quality command, since we're + * continuing to use the setup that we've been trying. */ + if (window->average_tpt > lq_sta->last_tpt) { + + IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE " + "suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + if (!is_legacy(tbl->lq_type)) + lq_sta->enable_counter = 1; + + /* Swap tables; "search" becomes "active" */ + lq_sta->active_tbl = active_tbl; + current_tpt = window->average_tpt; + + /* Else poor success; go back to mode in "active" table */ + } else { + + IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE " + "suc=%d cur-tpt=%d old-tpt=%d\n", + window->success_ratio, + window->average_tpt, + lq_sta->last_tpt); + + /* Nullify "search" table */ + tbl->lq_type = LQ_NONE; + + /* Revert to "active" table */ + active_tbl = lq_sta->active_tbl; + tbl = &(lq_sta->lq_info[active_tbl]); + + /* Revert to "active" rate and throughput info */ + index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); + current_tpt = lq_sta->last_tpt; + + /* Need to set up a new rate table in uCode */ + update_lq = 1; + } + + /* Either way, we've made a decision; modulation mode + * search is done, allow rate adjustment next time. */ + lq_sta->search_better_tbl = 0; + done_search = 1; /* Don't switch modes below! */ + goto lq_update; + } + + /* (Else) not in search of better modulation mode, try for better + * starting rate, while staying in this mode. */ + high_low = iwl4965_rs_get_adjacent_rate(priv, index, + rate_scale_index_msk, + tbl->lq_type); + low = high_low & 0xff; + high = (high_low >> 8) & 0xff; + + /* If user set max rate, dont allow higher than user constrain */ + if ((lq_sta->max_rate_idx != -1) && + (lq_sta->max_rate_idx < high)) + high = IWL_RATE_INVALID; + + sr = window->success_ratio; + + /* Collect measured throughputs for current and adjacent rates */ + current_tpt = window->average_tpt; + if (low != IWL_RATE_INVALID) + low_tpt = tbl->win[low].average_tpt; + if (high != IWL_RATE_INVALID) + high_tpt = tbl->win[high].average_tpt; + + scale_action = 0; + + /* Too many failures, decrease rate */ + if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) { + IWL_DEBUG_RATE(priv, + "decrease rate because of low success_ratio\n"); + scale_action = -1; + + /* No throughput measured yet for adjacent rates; try increase. */ + } else if ((low_tpt == IWL_INVALID_VALUE) && + (high_tpt == IWL_INVALID_VALUE)) { + + if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) + scale_action = 1; + else if (low != IWL_RATE_INVALID) + scale_action = 0; + } + + /* Both adjacent throughputs are measured, but neither one has better + * throughput; we're using the best rate, don't change it! */ + else if ((low_tpt != IWL_INVALID_VALUE) && + (high_tpt != IWL_INVALID_VALUE) && + (low_tpt < current_tpt) && + (high_tpt < current_tpt)) + scale_action = 0; + + /* At least one adjacent rate's throughput is measured, + * and may have better performance. */ + else { + /* Higher adjacent rate's throughput is measured */ + if (high_tpt != IWL_INVALID_VALUE) { + /* Higher rate has better throughput */ + if (high_tpt > current_tpt && + sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } else { + scale_action = 0; + } + + /* Lower adjacent rate's throughput is measured */ + } else if (low_tpt != IWL_INVALID_VALUE) { + /* Lower rate has better throughput */ + if (low_tpt > current_tpt) { + IWL_DEBUG_RATE(priv, + "decrease rate because of low tpt\n"); + scale_action = -1; + } else if (sr >= IWL_RATE_INCREASE_TH) { + scale_action = 1; + } + } + } + + /* Sanity check; asked for decrease, but success rate or throughput + * has been good at old rate. Don't change it. */ + if ((scale_action == -1) && (low != IWL_RATE_INVALID) && + ((sr > IWL_RATE_HIGH_TH) || + (current_tpt > (100 * tbl->expected_tpt[low])))) + scale_action = 0; + + switch (scale_action) { + case -1: + /* Decrease starting rate, update uCode's rate table */ + if (low != IWL_RATE_INVALID) { + update_lq = 1; + index = low; + } + + break; + case 1: + /* Increase starting rate, update uCode's rate table */ + if (high != IWL_RATE_INVALID) { + update_lq = 1; + index = high; + } + + break; + case 0: + /* No change */ + default: + break; + } + + IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d " + "high %d type %d\n", + index, scale_action, low, high, tbl->lq_type); + +lq_update: + /* Replace uCode's rate table for the destination station. */ + if (update_lq) + rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta, + tbl, index, is_green); + + /* Should we stay with this modulation mode, + * or search for a new one? */ + iwl4965_rs_stay_in_table(lq_sta, false); + + /* + * Search for new modulation mode if we're: + * 1) Not changing rates right now + * 2) Not just finishing up a search + * 3) Allowing a new search + */ + if (!update_lq && !done_search && + !lq_sta->stay_in_tbl && window->counter) { + /* Save current throughput to compare with "search" throughput*/ + lq_sta->last_tpt = current_tpt; + + /* Select a new "search" modulation mode to try. + * If one is found, set up the new "search" table. */ + if (is_legacy(tbl->lq_type)) + iwl4965_rs_move_legacy_other(priv, lq_sta, + conf, sta, index); + else if (is_siso(tbl->lq_type)) + iwl4965_rs_move_siso_to_other(priv, lq_sta, + conf, sta, index); + else /* (is_mimo2(tbl->lq_type)) */ + iwl4965_rs_move_mimo2_to_other(priv, lq_sta, + conf, sta, index); + + /* If new "search" mode was selected, set up in uCode table */ + if (lq_sta->search_better_tbl) { + /* Access the "search" table, clear its history. */ + tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &(tbl->win[i])); + + /* Use new "search" start rate */ + index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate); + + IWL_DEBUG_RATE(priv, + "Switch current mcs: %X index: %d\n", + tbl->current_rate, index); + iwl4965_rs_fill_link_cmd(priv, lq_sta, + tbl->current_rate); + iwl_legacy_send_lq_cmd(priv, ctx, + &lq_sta->lq, CMD_ASYNC, false); + } else + done_search = 1; + } + + if (done_search && !lq_sta->stay_in_tbl) { + /* If the "active" (non-search) mode was legacy, + * and we've tried switching antennas, + * but we haven't been able to try HT modes (not available), + * stay with best antenna legacy modulation for a while + * before next round of mode comparisons. */ + tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); + if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) && + lq_sta->action_counter > tbl1->max_search) { + IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n"); + iwl4965_rs_set_stay_in_table(priv, 1, lq_sta); + } + + /* If we're in an HT mode, and all 3 mode switch actions + * have been tried and compared, stay in this best modulation + * mode for a while before next round of mode comparisons. */ + if (lq_sta->enable_counter && + (lq_sta->action_counter >= tbl1->max_search)) { + if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && + (lq_sta->tx_agg_tid_en & (1 << tid)) && + (tid != MAX_TID_COUNT)) { + tid_data = + &priv->stations[lq_sta->lq.sta_id].tid[tid]; + if (tid_data->agg.state == IWL_AGG_OFF) { + IWL_DEBUG_RATE(priv, + "try to aggregate tid %d\n", + tid); + iwl4965_rs_tl_turn_on_agg(priv, tid, + lq_sta, sta); + } + } + iwl4965_rs_set_stay_in_table(priv, 0, lq_sta); + } + } + +out: + tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, + index, is_green); + i = index; + lq_sta->last_txrate_idx = i; +} + +/** + * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table + * + * The uCode's station table contains a table of fallback rates + * for automatic fallback during transmission. + * + * NOTE: This sets up a default set of values. These will be replaced later + * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of + * rc80211_simple. + * + * NOTE: Run REPLY_ADD_STA command to set up station table entry, before + * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, + * which requires station table entry to exist). + */ +static void iwl4965_rs_initialize_lq(struct iwl_priv *priv, + struct ieee80211_conf *conf, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta) +{ + struct iwl_scale_tbl_info *tbl; + int rate_idx; + int i; + u32 rate; + u8 use_green = iwl4965_rs_use_green(sta); + u8 active_tbl = 0; + u8 valid_tx_ant; + struct iwl_station_priv *sta_priv; + struct iwl_rxon_context *ctx; + + if (!sta || !lq_sta) + return; + + sta_priv = (void *)sta->drv_priv; + ctx = sta_priv->common.ctx; + + i = lq_sta->last_txrate_idx; + + valid_tx_ant = priv->hw_params.valid_tx_ant; + + if (!lq_sta->search_better_tbl) + active_tbl = lq_sta->active_tbl; + else + active_tbl = 1 - lq_sta->active_tbl; + + tbl = &(lq_sta->lq_info[active_tbl]); + + if ((i < 0) || (i >= IWL_RATE_COUNT)) + i = 0; + + rate = iwlegacy_rates[i].plcp; + tbl->ant_type = iwl4965_first_antenna(valid_tx_ant); + rate |= tbl->ant_type << RATE_MCS_ANT_POS; + + if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE) + rate |= RATE_MCS_CCK_MSK; + + iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx); + if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type)) + iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl); + + rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green); + tbl->current_rate = rate; + iwl4965_rs_set_expected_tpt_table(lq_sta, tbl); + iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate); + priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq; + iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true); +} + +static void +iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, + struct ieee80211_tx_rate_control *txrc) +{ + + struct sk_buff *skb = txrc->skb; + struct ieee80211_supported_band *sband = txrc->sband; + struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_lq_sta *lq_sta = priv_sta; + int rate_idx; + + IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n"); + + /* Get max rate if user set max rate */ + if (lq_sta) { + lq_sta->max_rate_idx = txrc->max_rate_idx; + if ((sband->band == IEEE80211_BAND_5GHZ) && + (lq_sta->max_rate_idx != -1)) + lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; + if ((lq_sta->max_rate_idx < 0) || + (lq_sta->max_rate_idx >= IWL_RATE_COUNT)) + lq_sta->max_rate_idx = -1; + } + + /* Treat uninitialized rate scaling data same as non-existing. */ + if (lq_sta && !lq_sta->drv) { + IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); + priv_sta = NULL; + } + + /* Send management frames and NO_ACK data using lowest rate. */ + if (rate_control_send_low(sta, priv_sta, txrc)) + return; + + rate_idx = lq_sta->last_txrate_idx; + + if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { + rate_idx -= IWL_FIRST_OFDM_RATE; + /* 6M and 9M shared same MCS index */ + rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; + if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >= + IWL_RATE_MIMO2_6M_PLCP) + rate_idx = rate_idx + MCS_INDEX_PER_STREAM; + info->control.rates[0].flags = IEEE80211_TX_RC_MCS; + if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_SHORT_GI; + if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_DUP_DATA; + if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_40_MHZ_WIDTH; + if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK) + info->control.rates[0].flags |= + IEEE80211_TX_RC_GREEN_FIELD; + } else { + /* Check for invalid rates */ + if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) || + ((sband->band == IEEE80211_BAND_5GHZ) && + (rate_idx < IWL_FIRST_OFDM_RATE))) + rate_idx = rate_lowest_index(sband, sta); + /* On valid 5 GHz rate, adjust index */ + else if (sband->band == IEEE80211_BAND_5GHZ) + rate_idx -= IWL_FIRST_OFDM_RATE; + info->control.rates[0].flags = 0; + } + info->control.rates[0].idx = rate_idx; + +} + +static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, + gfp_t gfp) +{ + struct iwl_lq_sta *lq_sta; + struct iwl_station_priv *sta_priv = + (struct iwl_station_priv *) sta->drv_priv; + struct iwl_priv *priv; + + priv = (struct iwl_priv *)priv_rate; + IWL_DEBUG_RATE(priv, "create station rate scale window\n"); + + lq_sta = &sta_priv->lq_sta; + + return lq_sta; +} + +/* + * Called after adding a new station to initialize rate scaling + */ +void +iwl4965_rs_rate_init(struct iwl_priv *priv, + struct ieee80211_sta *sta, + u8 sta_id) +{ + int i, j; + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_conf *conf = &priv->hw->conf; + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + struct iwl_station_priv *sta_priv; + struct iwl_lq_sta *lq_sta; + struct ieee80211_supported_band *sband; + + sta_priv = (struct iwl_station_priv *) sta->drv_priv; + lq_sta = &sta_priv->lq_sta; + sband = hw->wiphy->bands[conf->channel->band]; + + + lq_sta->lq.sta_id = sta_id; + + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &lq_sta->lq_info[j].win[i]); + + lq_sta->flush_timer = 0; + lq_sta->supp_rates = sta->supp_rates[sband->band]; + for (j = 0; j < LQ_SIZE; j++) + for (i = 0; i < IWL_RATE_COUNT; i++) + iwl4965_rs_rate_scale_clear_window( + &lq_sta->lq_info[j].win[i]); + + IWL_DEBUG_RATE(priv, "LQ:" + "*** rate scale station global init for station %d ***\n", + sta_id); + /* TODO: what is a good starting rate for STA? About middle? Maybe not + * the lowest or the highest rate.. Could consider using RSSI from + * previous packets? Need to have IEEE 802.1X auth succeed immediately + * after assoc.. */ + + lq_sta->is_dup = 0; + lq_sta->max_rate_idx = -1; + lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; + lq_sta->is_green = iwl4965_rs_use_green(sta); + lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); + lq_sta->band = priv->band; + /* + * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), + * supp_rates[] does not; shift to convert format, force 9 MBits off. + */ + lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1; + lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1; + lq_sta->active_siso_rate &= ~((u16)0x2); + lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE; + + /* Same here */ + lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1; + lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1; + lq_sta->active_mimo2_rate &= ~((u16)0x2); + lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; + + /* These values will be overridden later */ + lq_sta->lq.general_params.single_stream_ant_msk = + iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + lq_sta->lq.general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant & + ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + if (!lq_sta->lq.general_params.dual_stream_ant_msk) { + lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; + } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) { + lq_sta->lq.general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant; + } + + /* as default allow aggregation for all tids */ + lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; + lq_sta->drv = priv; + + /* Set last_txrate_idx to lowest rate */ + lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); + if (sband->band == IEEE80211_BAND_5GHZ) + lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; + lq_sta->is_agg = 0; + +#ifdef CONFIG_MAC80211_DEBUGFS + lq_sta->dbg_fixed_rate = 0; +#endif + + iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta); +} + +static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta, u32 new_rate) +{ + struct iwl_scale_tbl_info tbl_type; + int index = 0; + int rate_idx; + int repeat_rate = 0; + u8 ant_toggle_cnt = 0; + u8 use_ht_possible = 1; + u8 valid_tx_ant = 0; + struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; + + /* Override starting rate (index 0) if needed for debug purposes */ + iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Interpret new_rate (rate_n_flags) */ + iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, + &tbl_type, &rate_idx); + + /* How many times should we repeat the initial rate? */ + if (is_legacy(tbl_type.lq_type)) { + ant_toggle_cnt = 1; + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = IWL_HT_NUMBER_TRY; + } + + lq_cmd->general_params.mimo_delimiter = + is_mimo(tbl_type.lq_type) ? 1 : 0; + + /* Fill 1st table entry (index 0) */ + lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); + + if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) { + lq_cmd->general_params.single_stream_ant_msk = + tbl_type.ant_type; + } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) { + lq_cmd->general_params.dual_stream_ant_msk = + tbl_type.ant_type; + } /* otherwise we don't modify the existing value */ + + index++; + repeat_rate--; + if (priv) + valid_tx_ant = priv->hw_params.valid_tx_ant; + + /* Fill rest of rate table */ + while (index < LINK_QUAL_MAX_RETRY_NUM) { + /* Repeat initial/next rate. + * For legacy IWL_NUMBER_TRY == 1, this loop will not execute. + * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */ + while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) { + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (priv && + iwl4965_rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + } + + /* Override next rate if needed for debug purposes */ + iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index].rate_n_flags = + cpu_to_le32(new_rate); + repeat_rate--; + index++; + } + + iwl4965_rs_get_tbl_info_from_mcs(new_rate, + lq_sta->band, &tbl_type, + &rate_idx); + + /* Indicate to uCode which entries might be MIMO. + * If initial rate was MIMO, this will finally end up + * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */ + if (is_mimo(tbl_type.lq_type)) + lq_cmd->general_params.mimo_delimiter = index; + + /* Get next rate */ + new_rate = iwl4965_rs_get_lower_rate(lq_sta, + &tbl_type, rate_idx, + use_ht_possible); + + /* How many times should we repeat the next rate? */ + if (is_legacy(tbl_type.lq_type)) { + if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE) + ant_toggle_cnt++; + else if (priv && + iwl4965_rs_toggle_antenna(valid_tx_ant, + &new_rate, &tbl_type)) + ant_toggle_cnt = 1; + + repeat_rate = IWL_NUMBER_TRY; + } else { + repeat_rate = IWL_HT_NUMBER_TRY; + } + + /* Don't allow HT rates after next pass. + * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */ + use_ht_possible = 0; + + /* Override next rate if needed for debug purposes */ + iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index); + + /* Fill next table entry */ + lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate); + + index++; + repeat_rate--; + } + + lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + + lq_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); +} + +static void +*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) +{ + return hw->priv; +} +/* rate scale requires free function to be implemented */ +static void iwl4965_rs_free(void *priv_rate) +{ + return; +} + +static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta, + void *priv_sta) +{ + struct iwl_priv *priv __maybe_unused = priv_r; + + IWL_DEBUG_RATE(priv, "enter\n"); + IWL_DEBUG_RATE(priv, "leave\n"); +} + + +#ifdef CONFIG_MAC80211_DEBUGFS +static int iwl4965_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} +static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, + u32 *rate_n_flags, int index) +{ + struct iwl_priv *priv; + u8 valid_tx_ant; + u8 ant_sel_tx; + + priv = lq_sta->drv; + valid_tx_ant = priv->hw_params.valid_tx_ant; + if (lq_sta->dbg_fixed_rate) { + ant_sel_tx = + ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) + >> RATE_MCS_ANT_POS); + if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) { + *rate_n_flags = lq_sta->dbg_fixed_rate; + IWL_DEBUG_RATE(priv, "Fixed rate ON\n"); + } else { + lq_sta->dbg_fixed_rate = 0; + IWL_ERR(priv, + "Invalid antenna selection 0x%X, Valid is 0x%X\n", + ant_sel_tx, valid_tx_ant); + IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); + } + } else { + IWL_DEBUG_RATE(priv, "Fixed rate OFF\n"); + } +} + +static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + char buf[64]; + int buf_size; + u32 parsed_rate; + struct iwl_station_priv *sta_priv = + container_of(lq_sta, struct iwl_station_priv, lq_sta); + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + priv = lq_sta->drv; + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x", &parsed_rate) == 1) + lq_sta->dbg_fixed_rate = parsed_rate; + else + lq_sta->dbg_fixed_rate = 0; + + lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ + lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + + IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n", + lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); + + if (lq_sta->dbg_fixed_rate) { + iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); + iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, + false); + } + + return count; +} + +static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i = 0; + int index = 0; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); + + priv = lq_sta->drv; + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); + desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", + lq_sta->total_failed, lq_sta->total_success, + lq_sta->active_legacy_rate); + desc += sprintf(buff+desc, "fixed rate 0x%X\n", + lq_sta->dbg_fixed_rate); + desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", + (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", + (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", + (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : ""); + desc += sprintf(buff+desc, "lq type %s\n", + (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); + if (is_Ht(tbl->lq_type)) { + desc += sprintf(buff+desc, " %s", + (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2"); + desc += sprintf(buff+desc, " %s", + (tbl->is_ht40) ? "40MHz" : "20MHz"); + desc += sprintf(buff+desc, " %s %s %s\n", + (tbl->is_SGI) ? "SGI" : "", + (lq_sta->is_green) ? "GF enabled" : "", + (lq_sta->is_agg) ? "AGG on" : ""); + } + desc += sprintf(buff+desc, "last tx rate=0x%X\n", + lq_sta->last_rate_n_flags); + desc += sprintf(buff+desc, "general:" + "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n", + lq_sta->lq.general_params.flags, + lq_sta->lq.general_params.mimo_delimiter, + lq_sta->lq.general_params.single_stream_ant_msk, + lq_sta->lq.general_params.dual_stream_ant_msk); + + desc += sprintf(buff+desc, "agg:" + "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n", + le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit), + lq_sta->lq.agg_params.agg_dis_start_th, + lq_sta->lq.agg_params.agg_frame_cnt_limit); + + desc += sprintf(buff+desc, + "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", + lq_sta->lq.general_params.start_rate_index[0], + lq_sta->lq.general_params.start_rate_index[1], + lq_sta->lq.general_params.start_rate_index[2], + lq_sta->lq.general_params.start_rate_index[3]); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + index = iwl4965_hwrate_to_plcp_idx( + le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags)); + if (is_legacy(tbl->lq_type)) { + desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n", + i, + le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), + iwl_rate_mcs[index].mbps); + } else { + desc += sprintf(buff+desc, + " rate[%d] 0x%X %smbps (%s)\n", + i, + le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), + iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs); + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_scale_table_ops = { + .write = iwl4965_rs_sta_dbgfs_scale_table_write, + .read = iwl4965_rs_sta_dbgfs_scale_table_read, + .open = iwl4965_open_file_generic, + .llseek = default_llseek, +}; +static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char *buff; + int desc = 0; + int i, j; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + + buff = kmalloc(1024, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + for (i = 0; i < LQ_SIZE; i++) { + desc += sprintf(buff+desc, + "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n" + "rate=0x%X\n", + lq_sta->active_tbl == i ? "*" : "x", + lq_sta->lq_info[i].lq_type, + lq_sta->lq_info[i].is_SGI, + lq_sta->lq_info[i].is_ht40, + lq_sta->lq_info[i].is_dup, + lq_sta->is_green, + lq_sta->lq_info[i].current_rate); + for (j = 0; j < IWL_RATE_COUNT; j++) { + desc += sprintf(buff+desc, + "counter=%d success=%d %%=%d\n", + lq_sta->lq_info[i].win[j].counter, + lq_sta->lq_info[i].win[j].success_counter, + lq_sta->lq_info[i].win[j].success_ratio); + } + } + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + kfree(buff); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_stats_table_ops = { + .read = iwl4965_rs_sta_dbgfs_stats_table_read, + .open = iwl4965_open_file_generic, + .llseek = default_llseek, +}; + +static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file, + char __user *user_buf, size_t count, loff_t *ppos) +{ + char buff[120]; + int desc = 0; + ssize_t ret; + + struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_priv *priv; + struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; + + priv = lq_sta->drv; + + if (is_Ht(tbl->lq_type)) + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + tbl->expected_tpt[lq_sta->last_txrate_idx]); + else + desc += sprintf(buff+desc, + "Bit Rate= %d Mb/s\n", + iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1); + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); + return ret; +} + +static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { + .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read, + .open = iwl4965_open_file_generic, + .llseek = default_llseek, +}; + +static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta, + struct dentry *dir) +{ + struct iwl_lq_sta *lq_sta = priv_sta; + lq_sta->rs_sta_dbgfs_scale_table_file = + debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir, + lq_sta, &rs_sta_dbgfs_scale_table_ops); + lq_sta->rs_sta_dbgfs_stats_table_file = + debugfs_create_file("rate_stats_table", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_stats_table_ops); + lq_sta->rs_sta_dbgfs_rate_scale_data_file = + debugfs_create_file("rate_scale_data", S_IRUSR, dir, + lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); + lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = + debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, + &lq_sta->tx_agg_tid_en); + +} + +static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta) +{ + struct iwl_lq_sta *lq_sta = priv_sta; + debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); +} +#endif + +/* + * Initialization of rate scaling information is done by driver after + * the station is added. Since mac80211 calls this function before a + * station is added we ignore it. + */ +static void +iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband, + struct ieee80211_sta *sta, void *priv_sta) +{ +} +static struct rate_control_ops rs_4965_ops = { + .module = NULL, + .name = IWL4965_RS_NAME, + .tx_status = iwl4965_rs_tx_status, + .get_rate = iwl4965_rs_get_rate, + .rate_init = iwl4965_rs_rate_init_stub, + .alloc = iwl4965_rs_alloc, + .free = iwl4965_rs_free, + .alloc_sta = iwl4965_rs_alloc_sta, + .free_sta = iwl4965_rs_free_sta, +#ifdef CONFIG_MAC80211_DEBUGFS + .add_sta_debugfs = iwl4965_rs_add_debugfs, + .remove_sta_debugfs = iwl4965_rs_remove_debugfs, +#endif +}; + +int iwl4965_rate_control_register(void) +{ + pr_err("Registering 4965 rate control operations\n"); + return ieee80211_rate_control_register(&rs_4965_ops); +} + +void iwl4965_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&rs_4965_ops); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c index bbd40b7dd597..b9fa2f6411a7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c +++ b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c @@ -2,7 +2,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,14 +34,14 @@ #include "iwl-dev.h" #include "iwl-core.h" -#include "iwl-agn-calib.h" +#include "iwl-4965-calib.h" #include "iwl-sta.h" #include "iwl-io.h" #include "iwl-helpers.h" -#include "iwl-agn-hw.h" -#include "iwl-agn.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" -void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, +void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { @@ -58,14 +58,14 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, le32_to_cpu(missed_beacon->num_recvd_beacons), le32_to_cpu(missed_beacon->num_expected_beacons)); if (!test_bit(STATUS_SCANNING, &priv->status)) - iwl_init_sensitivity(priv); + iwl4965_init_sensitivity(priv); } } /* Calculate noise level, based on measurements during network silence just * before arriving beacon. This measurement can be done only if we know * exactly when to expect beacons, therefore only when we're associated. */ -static void iwl_rx_calc_noise(struct iwl_priv *priv) +static void iwl4965_rx_calc_noise(struct iwl_priv *priv) { struct statistics_rx_non_phy *rx_info; int num_active_rx = 0; @@ -73,11 +73,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv) int bcn_silence_a, bcn_silence_b, bcn_silence_c; int last_rx_noise; - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) - rx_info = &(priv->_agn.statistics_bt.rx.general.common); - else - rx_info = &(priv->_agn.statistics.rx.general); + rx_info = &(priv->_4965.statistics.rx.general); bcn_silence_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; bcn_silence_b = @@ -109,13 +105,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv) last_rx_noise); } -#ifdef CONFIG_IWLWIFI_DEBUGFS +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS /* * based on the assumption of all statistics counter are in DWORD * FIXME: This function is for debugging, do not deal with * the case of counters roll-over. */ -static void iwl_accumulative_statistics(struct iwl_priv *priv, +static void iwl4965_accumulative_statistics(struct iwl_priv *priv, __le32 *stats) { int i, size; @@ -125,28 +121,16 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, struct statistics_general_common *general, *accum_general; struct statistics_tx *tx, *accum_tx; - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) { - prev_stats = (__le32 *)&priv->_agn.statistics_bt; - accum_stats = (u32 *)&priv->_agn.accum_statistics_bt; - size = sizeof(struct iwl_bt_notif_statistics); - general = &priv->_agn.statistics_bt.general.common; - accum_general = &priv->_agn.accum_statistics_bt.general.common; - tx = &priv->_agn.statistics_bt.tx; - accum_tx = &priv->_agn.accum_statistics_bt.tx; - delta = (u32 *)&priv->_agn.delta_statistics_bt; - max_delta = (u32 *)&priv->_agn.max_delta_bt; - } else { - prev_stats = (__le32 *)&priv->_agn.statistics; - accum_stats = (u32 *)&priv->_agn.accum_statistics; - size = sizeof(struct iwl_notif_statistics); - general = &priv->_agn.statistics.general.common; - accum_general = &priv->_agn.accum_statistics.general.common; - tx = &priv->_agn.statistics.tx; - accum_tx = &priv->_agn.accum_statistics.tx; - delta = (u32 *)&priv->_agn.delta_statistics; - max_delta = (u32 *)&priv->_agn.max_delta; - } + prev_stats = (__le32 *)&priv->_4965.statistics; + accum_stats = (u32 *)&priv->_4965.accum_statistics; + size = sizeof(struct iwl_notif_statistics); + general = &priv->_4965.statistics.general.common; + accum_general = &priv->_4965.accum_statistics.general.common; + tx = &priv->_4965.statistics.tx; + accum_tx = &priv->_4965.accum_statistics.tx; + delta = (u32 *)&priv->_4965.delta_statistics; + max_delta = (u32 *)&priv->_4965.max_delta; + for (i = sizeof(__le32); i < size; i += sizeof(__le32), stats++, prev_stats++, delta++, max_delta++, accum_stats++) { @@ -161,23 +145,19 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv, /* reset accumulative statistics for "no-counter" type statistics */ accum_general->temperature = general->temperature; - accum_general->temperature_m = general->temperature_m; accum_general->ttl_timestamp = general->ttl_timestamp; - accum_tx->tx_power.ant_a = tx->tx_power.ant_a; - accum_tx->tx_power.ant_b = tx->tx_power.ant_b; - accum_tx->tx_power.ant_c = tx->tx_power.ant_c; } #endif #define REG_RECALIB_PERIOD (60) /** - * iwl_good_plcp_health - checks for plcp error. + * iwl4965_good_plcp_health - checks for plcp error. * * When the plcp error is exceeding the thresholds, reset the radio * to improve the throughput. */ -bool iwl_good_plcp_health(struct iwl_priv *priv, +bool iwl4965_good_plcp_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt) { bool rc = true; @@ -207,28 +187,15 @@ bool iwl_good_plcp_health(struct iwl_priv *priv, struct statistics_rx_phy *ofdm; struct statistics_rx_ht_phy *ofdm_ht; - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) { - ofdm = &pkt->u.stats_bt.rx.ofdm; - ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht; - combined_plcp_delta = - (le32_to_cpu(ofdm->plcp_err) - - le32_to_cpu(priv->_agn.statistics_bt. - rx.ofdm.plcp_err)) + - (le32_to_cpu(ofdm_ht->plcp_err) - - le32_to_cpu(priv->_agn.statistics_bt. - rx.ofdm_ht.plcp_err)); - } else { - ofdm = &pkt->u.stats.rx.ofdm; - ofdm_ht = &pkt->u.stats.rx.ofdm_ht; - combined_plcp_delta = - (le32_to_cpu(ofdm->plcp_err) - - le32_to_cpu(priv->_agn.statistics. - rx.ofdm.plcp_err)) + - (le32_to_cpu(ofdm_ht->plcp_err) - - le32_to_cpu(priv->_agn.statistics. - rx.ofdm_ht.plcp_err)); - } + ofdm = &pkt->u.stats.rx.ofdm; + ofdm_ht = &pkt->u.stats.rx.ofdm_ht; + combined_plcp_delta = + (le32_to_cpu(ofdm->plcp_err) - + le32_to_cpu(priv->_4965.statistics. + rx.ofdm.plcp_err)) + + (le32_to_cpu(ofdm_ht->plcp_err) - + le32_to_cpu(priv->_4965.statistics. + rx.ofdm_ht.plcp_err)); if ((combined_plcp_delta > 0) && ((combined_plcp_delta * 100) / plcp_msec) > @@ -259,58 +226,32 @@ bool iwl_good_plcp_health(struct iwl_priv *priv, return rc; } -void iwl_rx_statistics(struct iwl_priv *priv, +void iwl4965_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { int change; struct iwl_rx_packet *pkt = rxb_addr(rxb); - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) { - IWL_DEBUG_RX(priv, - "Statistics notification received (%d vs %d).\n", - (int)sizeof(struct iwl_bt_notif_statistics), - le32_to_cpu(pkt->len_n_flags) & - FH_RSCSR_FRAME_SIZE_MSK); - - change = ((priv->_agn.statistics_bt.general.common.temperature != - pkt->u.stats_bt.general.common.temperature) || - ((priv->_agn.statistics_bt.flag & - STATISTICS_REPLY_FLG_HT40_MODE_MSK) != - (pkt->u.stats_bt.flag & - STATISTICS_REPLY_FLG_HT40_MODE_MSK))); -#ifdef CONFIG_IWLWIFI_DEBUGFS - iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt); + IWL_DEBUG_RX(priv, + "Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl_notif_statistics), + le32_to_cpu(pkt->len_n_flags) & + FH_RSCSR_FRAME_SIZE_MSK); + + change = ((priv->_4965.statistics.general.common.temperature != + pkt->u.stats.general.common.temperature) || + ((priv->_4965.statistics.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK) != + (pkt->u.stats.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK))); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); #endif - } else { - IWL_DEBUG_RX(priv, - "Statistics notification received (%d vs %d).\n", - (int)sizeof(struct iwl_notif_statistics), - le32_to_cpu(pkt->len_n_flags) & - FH_RSCSR_FRAME_SIZE_MSK); + iwl_legacy_recover_from_statistics(priv, pkt); - change = ((priv->_agn.statistics.general.common.temperature != - pkt->u.stats.general.common.temperature) || - ((priv->_agn.statistics.flag & - STATISTICS_REPLY_FLG_HT40_MODE_MSK) != - (pkt->u.stats.flag & - STATISTICS_REPLY_FLG_HT40_MODE_MSK))); -#ifdef CONFIG_IWLWIFI_DEBUGFS - iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); -#endif - - } - - iwl_recover_from_statistics(priv, pkt); - - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) - memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt, - sizeof(priv->_agn.statistics_bt)); - else - memcpy(&priv->_agn.statistics, &pkt->u.stats, - sizeof(priv->_agn.statistics)); + memcpy(&priv->_4965.statistics, &pkt->u.stats, + sizeof(priv->_4965.statistics)); set_bit(STATUS_STATISTICS, &priv->status); @@ -323,34 +264,28 @@ void iwl_rx_statistics(struct iwl_priv *priv, if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { - iwl_rx_calc_noise(priv); + iwl4965_rx_calc_noise(priv); queue_work(priv->workqueue, &priv->run_time_calib_work); } if (priv->cfg->ops->lib->temp_ops.temperature && change) priv->cfg->ops->lib->temp_ops.temperature(priv); } -void iwl_reply_statistics(struct iwl_priv *priv, +void iwl4965_reply_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { -#ifdef CONFIG_IWLWIFI_DEBUGFS - memset(&priv->_agn.accum_statistics, 0, +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + memset(&priv->_4965.accum_statistics, 0, sizeof(struct iwl_notif_statistics)); - memset(&priv->_agn.delta_statistics, 0, + memset(&priv->_4965.delta_statistics, 0, sizeof(struct iwl_notif_statistics)); - memset(&priv->_agn.max_delta, 0, + memset(&priv->_4965.max_delta, 0, sizeof(struct iwl_notif_statistics)); - memset(&priv->_agn.accum_statistics_bt, 0, - sizeof(struct iwl_bt_notif_statistics)); - memset(&priv->_agn.delta_statistics_bt, 0, - sizeof(struct iwl_bt_notif_statistics)); - memset(&priv->_agn.max_delta_bt, 0, - sizeof(struct iwl_bt_notif_statistics)); #endif IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); } - iwl_rx_statistics(priv, rxb); + iwl4965_rx_statistics(priv, rxb); } diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c new file mode 100644 index 000000000000..a262c23553d2 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c @@ -0,0 +1,721 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <net/mac80211.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-4965.h" + +static struct iwl_link_quality_cmd * +iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id) +{ + int i, r; + struct iwl_link_quality_cmd *link_cmd; + u32 rate_flags = 0; + __le32 rate_n_flags; + + link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL); + if (!link_cmd) { + IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n"); + return NULL; + } + /* Set up the rate scaling to start at selected rate, fall back + * all the way down to 1M in IEEE order, and then spin on 1M */ + if (priv->band == IEEE80211_BAND_5GHZ) + r = IWL_RATE_6M_INDEX; + else + r = IWL_RATE_1M_INDEX; + + if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + + rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) << + RATE_MCS_ANT_POS; + rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp, + rate_flags); + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + link_cmd->rs_table[i].rate_n_flags = rate_n_flags; + + link_cmd->general_params.single_stream_ant_msk = + iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + + link_cmd->general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant & + ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant); + if (!link_cmd->general_params.dual_stream_ant_msk) { + link_cmd->general_params.dual_stream_ant_msk = ANT_AB; + } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) { + link_cmd->general_params.dual_stream_ant_msk = + priv->hw_params.valid_tx_ant; + } + + link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; + link_cmd->agg_params.agg_time_limit = + cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + + link_cmd->sta_id = sta_id; + + return link_cmd; +} + +/* + * iwl4965_add_bssid_station - Add the special IBSS BSSID station + * + * Function sleeps. + */ +int +iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + const u8 *addr, u8 *sta_id_r) +{ + int ret; + u8 sta_id; + struct iwl_link_quality_cmd *link_cmd; + unsigned long flags; + + if (sta_id_r) + *sta_id_r = IWL_INVALID_STATION; + + ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM\n", addr); + return ret; + } + + if (sta_id_r) + *sta_id_r = sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].used |= IWL_STA_LOCAL; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + /* Set up default rate scaling table in device's station table */ + link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); + if (!link_cmd) { + IWL_ERR(priv, + "Unable to initialize rate scaling for station %pM.\n", + addr); + return -ENOMEM; + } + + ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true); + if (ret) + IWL_ERR(priv, "Link quality command failed (%d)\n", ret); + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} + +static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + bool send_if_empty) +{ + int i, not_empty = 0; + u8 buff[sizeof(struct iwl_wep_cmd) + + sizeof(struct iwl_wep_key) * WEP_KEYS_MAX]; + struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff; + size_t cmd_size = sizeof(struct iwl_wep_cmd); + struct iwl_host_cmd cmd = { + .id = ctx->wep_key_cmd, + .data = wep_cmd, + .flags = CMD_SYNC, + }; + + might_sleep(); + + memset(wep_cmd, 0, cmd_size + + (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX)); + + for (i = 0; i < WEP_KEYS_MAX ; i++) { + wep_cmd->key[i].key_index = i; + if (ctx->wep_keys[i].key_size) { + wep_cmd->key[i].key_offset = i; + not_empty = 1; + } else { + wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET; + } + + wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size; + memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key, + ctx->wep_keys[i].key_size); + } + + wep_cmd->global_key_type = WEP_KEY_WEP_TYPE; + wep_cmd->num_keys = WEP_KEYS_MAX; + + cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX; + + cmd.len = cmd_size; + + if (not_empty || send_if_empty) + return iwl_legacy_send_cmd(priv, &cmd); + else + return 0; +} + +int iwl4965_restore_default_wep_keys(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + lockdep_assert_held(&priv->mutex); + + return iwl4965_static_wepkey_cmd(priv, ctx, false); +} + +int iwl4965_remove_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", + keyconf->keyidx); + + memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0])); + if (iwl_legacy_is_rfkill(priv)) { + IWL_DEBUG_WEP(priv, + "Not sending REPLY_WEPKEY command due to RFKILL.\n"); + /* but keys in device are clear anyway so return success */ + return 0; + } + ret = iwl4965_static_wepkey_cmd(priv, ctx, 1); + IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", + keyconf->keyidx, ret); + + return ret; +} + +int iwl4965_set_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + if (keyconf->keylen != WEP_KEY_LEN_128 && + keyconf->keylen != WEP_KEY_LEN_64) { + IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen); + return -EINVAL; + } + + keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->hw_key_idx = HW_KEY_DEFAULT; + priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher; + + ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; + memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key, + keyconf->keylen); + + ret = iwl4965_static_wepkey_cmd(priv, ctx, false); + IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", + keyconf->keylen, keyconf->keyidx, ret); + + return ret; +} + +static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV; + + key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (keyconf->keylen == WEP_KEY_LEN_128) + key_flags |= STA_KEY_FLG_KEY_SIZE_MSK; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx; + + memcpy(priv->stations[sta_id].keyinfo.key, + keyconf->key, keyconf->keylen); + + memcpy(&priv->stations[sta_id].sta.key.key[3], + keyconf->key, keyconf->keylen); + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_legacy_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + __le16 key_flags = 0; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; + priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; + + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, + keyconf->keylen); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, + keyconf->keylen); + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_legacy_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + int ret = 0; + __le16 key_flags = 0; + + key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK); + key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); + key_flags &= ~STA_KEY_FLG_INVALID; + + if (sta_id == ctx->bcast_sta_id) + key_flags |= STA_KEY_MULTICAST_MSK; + + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; + priv->stations[sta_id].keyinfo.keylen = 16; + + if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) + == STA_KEY_FLG_NO_ENC) + priv->stations[sta_id].sta.key.key_offset = + iwl_legacy_get_free_ucode_key_index(priv); + /* else, we are overriding an existing key => no need to allocated room + * in uCode. */ + + WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, + "no space for a new key"); + + priv->stations[sta_id].sta.key.key_flags = key_flags; + + + /* This copy is acutally not needed: we get the key with each TX */ + memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); + + memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +void iwl4965_update_tkip_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, u16 *phase1key) +{ + u8 sta_id; + unsigned long flags; + int i; + + if (iwl_legacy_scan_cancel(priv)) { + /* cancel scan failed, just live w/ bad key and rely + briefly on SW decryption */ + return; + } + + sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta); + if (sta_id == IWL_INVALID_STATION) + return; + + spin_lock_irqsave(&priv->sta_lock, flags); + + priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32; + + for (i = 0; i < 5; i++) + priv->stations[sta_id].sta.key.tkip_rx_ttak[i] = + cpu_to_le16(phase1key[i]); + + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + +} + +int iwl4965_remove_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + u8 sta_id) +{ + unsigned long flags; + u16 key_flags; + u8 keyidx; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + ctx->key_mapping_keys--; + + spin_lock_irqsave(&priv->sta_lock, flags); + key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags); + keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3; + + IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n", + keyconf->keyidx, sta_id); + + if (keyconf->keyidx != keyidx) { + /* We need to remove a key with index different that the one + * in the uCode. This means that the key we need to remove has + * been replaced by another one with different index. + * Don't do anything and return ok + */ + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + + if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) { + IWL_WARN(priv, "Removing wrong key %d 0x%x\n", + keyconf->keyidx, key_flags); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + + if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, + &priv->ucode_key_table)) + IWL_ERR(priv, "index %d not used in uCode key table.\n", + priv->stations[sta_id].sta.key.key_offset); + memset(&priv->stations[sta_id].keyinfo, 0, + sizeof(struct iwl_hw_key)); + memset(&priv->stations[sta_id].sta.key, 0, + sizeof(struct iwl4965_keyinfo)); + priv->stations[sta_id].sta.key.key_flags = + STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID; + priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + + if (iwl_legacy_is_rfkill(priv)) { + IWL_DEBUG_WEP(priv, + "Not sending REPLY_ADD_STA command because RFKILL enabled.\n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, u8 sta_id) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + ctx->key_mapping_keys++; + keyconf->hw_key_idx = HW_KEY_DYNAMIC; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx, + keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_TKIP: + ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx, + keyconf, sta_id); + break; + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + ret = iwl4965_set_wep_dynamic_key_info(priv, ctx, + keyconf, sta_id); + break; + default: + IWL_ERR(priv, + "Unknown alg: %s cipher = %x\n", __func__, + keyconf->cipher); + ret = -EINVAL; + } + + IWL_DEBUG_WEP(priv, + "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n", + keyconf->cipher, keyconf->keylen, keyconf->keyidx, + sta_id, ret); + + return ret; +} + +/** + * iwl4965_alloc_bcast_station - add broadcast station into driver's station table. + * + * This adds the broadcast station into the driver's station table + * and marks it driver active, so that it will be restored to the + * device at the next best time. + */ +int iwl4965_alloc_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct iwl_link_quality_cmd *link_cmd; + unsigned long flags; + u8 sta_id; + + spin_lock_irqsave(&priv->sta_lock, flags); + sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr, + false, NULL); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Unable to prepare broadcast station\n"); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return -EINVAL; + } + + priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used |= IWL_STA_BCAST; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); + if (!link_cmd) { + IWL_ERR(priv, + "Unable to initialize rate scaling for bcast station.\n"); + return -ENOMEM; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} + +/** + * iwl4965_update_bcast_station - update broadcast station's LQ command + * + * Only used by iwl4965. Placed here to have all bcast station management + * code together. + */ +static int iwl4965_update_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + unsigned long flags; + struct iwl_link_quality_cmd *link_cmd; + u8 sta_id = ctx->bcast_sta_id; + + link_cmd = iwl4965_sta_alloc_lq(priv, sta_id); + if (!link_cmd) { + IWL_ERR(priv, + "Unable to initialize rate scaling for bcast station.\n"); + return -ENOMEM; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + if (priv->stations[sta_id].lq) + kfree(priv->stations[sta_id].lq); + else + IWL_DEBUG_INFO(priv, + "Bcast station rate scaling has not been initialized yet.\n"); + priv->stations[sta_id].lq = link_cmd; + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return 0; +} + +int iwl4965_update_bcast_stations(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + int ret = 0; + + for_each_context(priv, ctx) { + ret = iwl4965_update_bcast_station(priv, ctx); + if (ret) + break; + } + + return ret; +} + +/** + * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table + */ +int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid) +{ + unsigned long flags; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + /* Remove "disable" flag, to enable Tx for this TID */ + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; + priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid, u16 ssn) +{ + unsigned long flags; + int sta_id; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + sta_id = iwl_legacy_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) + return -ENXIO; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; + priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid) +{ + unsigned long flags; + int sta_id; + struct iwl_legacy_addsta_cmd sta_cmd; + + lockdep_assert_held(&priv->mutex); + + sta_id = iwl_legacy_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags_msk = 0; + priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; + priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); +} + +void +iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->sta_lock, flags); + priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; + priv->stations[sta_id].sta.sta.modify_mask = + STA_MODIFY_SLEEP_TX_COUNT_MSK; + priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); + priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; + iwl_legacy_send_add_sta(priv, + &priv->stations[sta_id].sta, CMD_ASYNC); + spin_unlock_irqrestore(&priv->sta_lock, flags); + +} diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c new file mode 100644 index 000000000000..5c40502f869a --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c @@ -0,0 +1,1369 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" + +/* + * mac80211 queues, ACs, hardware queues, FIFOs. + * + * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues + * + * Mac80211 uses the following numbers, which we get as from it + * by way of skb_get_queue_mapping(skb): + * + * VO 0 + * VI 1 + * BE 2 + * BK 3 + * + * + * Regular (not A-MPDU) frames are put into hardware queues corresponding + * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their + * own queue per aggregation session (RA/TID combination), such queues are + * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In + * order to map frames to the right queue, we also need an AC->hw queue + * mapping. This is implemented here. + * + * Due to the way hw queues are set up (by the hw specific modules like + * iwl-4965.c), the AC->hw queue mapping is the identity + * mapping. + */ + +static const u8 tid_to_ac[] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO +}; + +static inline int iwl4965_get_ac_from_tid(u16 tid) +{ + if (likely(tid < ARRAY_SIZE(tid_to_ac))) + return tid_to_ac[tid]; + + /* no support for TIDs 8-15 yet */ + return -EINVAL; +} + +static inline int +iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid) +{ + if (likely(tid < ARRAY_SIZE(tid_to_ac))) + return ctx->ac_to_fifo[tid_to_ac[tid]]; + + /* no support for TIDs 8-15 yet */ + return -EINVAL; +} + +/* + * handle build REPLY_TX command notification. + */ +static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv, + struct sk_buff *skb, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + struct ieee80211_hdr *hdr, + u8 std_id) +{ + __le16 fc = hdr->frame_control; + __le32 tx_flags = tx_cmd->tx_flags; + + tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + tx_flags |= TX_CMD_FLG_ACK_MSK; + if (ieee80211_is_mgmt(fc)) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (ieee80211_is_probe_resp(fc) && + !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) + tx_flags |= TX_CMD_FLG_TSF_MSK; + } else { + tx_flags &= (~TX_CMD_FLG_ACK_MSK); + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + if (ieee80211_is_back_req(fc)) + tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; + + tx_cmd->sta_id = std_id; + if (ieee80211_has_morefrags(fc)) + tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; + + if (ieee80211_is_data_qos(fc)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + tx_cmd->tid_tspec = qc[0] & 0xf; + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; + } else { + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + } + + iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags); + + tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); + if (ieee80211_is_mgmt(fc)) { + if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); + else + tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); + } else { + tx_cmd->timeout.pm_frame_timeout = 0; + } + + tx_cmd->driver_txop = 0; + tx_cmd->tx_flags = tx_flags; + tx_cmd->next_frame_len = 0; +} + +#define RTS_DFAULT_RETRY_LIMIT 60 + +static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv, + struct iwl_tx_cmd *tx_cmd, + struct ieee80211_tx_info *info, + __le16 fc) +{ + u32 rate_flags; + int rate_idx; + u8 rts_retry_limit; + u8 data_retry_limit; + u8 rate_plcp; + + /* Set retry limit on DATA packets and Probe Responses*/ + if (ieee80211_is_probe_resp(fc)) + data_retry_limit = 3; + else + data_retry_limit = IWL4965_DEFAULT_TX_RETRY; + tx_cmd->data_retry_limit = data_retry_limit; + + /* Set retry limit on RTS packets */ + rts_retry_limit = RTS_DFAULT_RETRY_LIMIT; + if (data_retry_limit < rts_retry_limit) + rts_retry_limit = data_retry_limit; + tx_cmd->rts_retry_limit = rts_retry_limit; + + /* DATA packets will use the uCode station table for rate/antenna + * selection */ + if (ieee80211_is_data(fc)) { + tx_cmd->initial_rate_index = 0; + tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; + return; + } + + /** + * If the current TX rate stored in mac80211 has the MCS bit set, it's + * not really a TX rate. Thus, we use the lowest supported rate for + * this band. Also use the lowest supported rate if the stored rate + * index is invalid. + */ + rate_idx = info->control.rates[0].idx; + if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS || + (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY)) + rate_idx = rate_lowest_index(&priv->bands[info->band], + info->control.sta); + /* For 5 GHZ band, remap mac80211 rate indices into driver indices */ + if (info->band == IEEE80211_BAND_5GHZ) + rate_idx += IWL_FIRST_OFDM_RATE; + /* Get PLCP rate for tx_cmd->rate_n_flags */ + rate_plcp = iwlegacy_rates[rate_idx].plcp; + /* Zero out flags for this packet */ + rate_flags = 0; + + /* Set CCK flag as needed */ + if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + + /* Set up antennas */ + priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant, + priv->hw_params.valid_tx_ant); + + rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant); + + /* Set the rate in the TX cmd */ + tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags); +} + +static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + struct iwl_tx_cmd *tx_cmd, + struct sk_buff *skb_frag, + int sta_id) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + tx_cmd->sec_ctl = TX_CMD_SEC_CCM; + memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); + if (info->flags & IEEE80211_TX_CTL_AMPDU) + tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; + IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); + break; + + case WLAN_CIPHER_SUITE_TKIP: + tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; + ieee80211_get_tkip_key(keyconf, skb_frag, + IEEE80211_TKIP_P2_KEY, tx_cmd->key); + IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n"); + break; + + case WLAN_CIPHER_SUITE_WEP104: + tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; + /* fall through */ + case WLAN_CIPHER_SUITE_WEP40: + tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | + (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); + + memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); + + IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " + "with key %d\n", keyconf->keyidx); + break; + + default: + IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher); + break; + } +} + +/* + * start REPLY_TX command process + */ +int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *sta = info->control.sta; + struct iwl_station_priv *sta_priv = NULL; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + struct iwl_tx_cmd *tx_cmd; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + int txq_id; + dma_addr_t phys_addr; + dma_addr_t txcmd_phys; + dma_addr_t scratch_phys; + u16 len, firstlen, secondlen; + u16 seq_number = 0; + __le16 fc; + u8 hdr_len; + u8 sta_id; + u8 wait_write_ptr = 0; + u8 tid = 0; + u8 *qc = NULL; + unsigned long flags; + bool is_agg = false; + + if (info->control.vif) + ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif); + + spin_lock_irqsave(&priv->lock, flags); + if (iwl_legacy_is_rfkill(priv)) { + IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); + goto drop_unlock; + } + + fc = hdr->frame_control; + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (ieee80211_is_auth(fc)) + IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); + else if (ieee80211_is_assoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); + else if (ieee80211_is_reassoc_req(fc)) + IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); +#endif + + hdr_len = ieee80211_hdrlen(fc); + + /* Find index into station table for destination station */ + sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", + hdr->addr1); + goto drop_unlock; + } + + IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); + + if (sta) + sta_priv = (void *)sta->drv_priv; + + if (sta_priv && sta_priv->asleep && + (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) { + /* + * This sends an asynchronous command to the device, + * but we can rely on it being processed before the + * next frame is processed -- and the next frame to + * this station is the one that will consume this + * counter. + * For now set the counter to just 1 since we do not + * support uAPSD yet. + */ + iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1); + } + + /* + * Send this frame after DTIM -- there's a special queue + * reserved for this for contexts that support AP mode. + */ + if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { + txq_id = ctx->mcast_queue; + /* + * The microcode will clear the more data + * bit in the last frame it transmits. + */ + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } else + txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)]; + + /* irqs already disabled/saved above when locking priv->lock */ + spin_lock(&priv->sta_lock); + + if (ieee80211_is_data_qos(fc)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { + spin_unlock(&priv->sta_lock); + goto drop_unlock; + } + seq_number = priv->stations[sta_id].tid[tid].seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); + seq_number += 0x10; + /* aggregation is on for this <sta,tid> */ + if (info->flags & IEEE80211_TX_CTL_AMPDU && + priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { + txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; + is_agg = true; + } + } + + txq = &priv->txq[txq_id]; + q = &txq->q; + + if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) { + spin_unlock(&priv->sta_lock); + goto drop_unlock; + } + + if (ieee80211_is_data_qos(fc)) { + priv->stations[sta_id].tid[tid].tfds_in_queue++; + if (!ieee80211_has_morefrags(fc)) + priv->stations[sta_id].tid[tid].seq_number = seq_number; + } + + spin_unlock(&priv->sta_lock); + + /* Set up driver data for this TFD */ + memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); + txq->txb[q->write_ptr].skb = skb; + txq->txb[q->write_ptr].ctx = ctx; + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_cmd = txq->cmd[q->write_ptr]; + out_meta = &txq->meta[q->write_ptr]; + tx_cmd = &out_cmd->cmd.tx; + memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); + memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); + + /* + * Set up the Tx-command (not MAC!) header. + * Store the chosen Tx queue and TFD index within the sequence field; + * after Tx, uCode's Tx response will return this value so driver can + * locate the frame within the tx queue and do post-tx processing. + */ + out_cmd->hdr.cmd = REPLY_TX; + out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(q->write_ptr))); + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, hdr_len); + + + /* Total # bytes to be transmitted */ + len = (u16)skb->len; + tx_cmd->len = cpu_to_le16(len); + + if (info->control.hw_key) + iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); + + /* TODO need this for burst mode later on */ + iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); + iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr); + + iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc); + + iwl_legacy_update_stats(priv, true, fc, len); + /* + * Use the first empty entry in this queue's command buffer array + * to contain the Tx command and MAC header concatenated together + * (payload data will be in another buffer). + * Size of this varies, due to varying MAC header length. + * If end is not dword aligned, we'll have 2 extra bytes at the end + * of the MAC header (device reads on dword boundaries). + * We'll tell device about this padding later. + */ + len = sizeof(struct iwl_tx_cmd) + + sizeof(struct iwl_cmd_header) + hdr_len; + firstlen = (len + 3) & ~3; + + /* Tell NIC about any 2-byte padding after MAC header */ + if (firstlen != len) + tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + + /* Physical address of this Tx command's header (not MAC header!), + * within command buffer array. */ + txcmd_phys = pci_map_single(priv->pci_dev, + &out_cmd->hdr, firstlen, + PCI_DMA_BIDIRECTIONAL); + dma_unmap_addr_set(out_meta, mapping, txcmd_phys); + dma_unmap_len_set(out_meta, len, firstlen); + /* Add buffer containing Tx command and MAC(!) header to TFD's + * first entry */ + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + txcmd_phys, firstlen, 1, 0); + + if (!ieee80211_has_morefrags(hdr->frame_control)) { + txq->need_update = 1; + } else { + wait_write_ptr = 1; + txq->need_update = 0; + } + + /* Set up TFD's 2nd entry to point directly to remainder of skb, + * if any (802.11 null frames have no payload). */ + secondlen = skb->len - hdr_len; + if (secondlen > 0) { + phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, + secondlen, PCI_DMA_TODEVICE); + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, secondlen, + 0, 0); + } + + scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + + offsetof(struct iwl_tx_cmd, scratch); + + /* take back ownership of DMA buffer to enable update */ + pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, + firstlen, PCI_DMA_BIDIRECTIONAL); + tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); + tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys); + + IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", + le16_to_cpu(out_cmd->hdr.sequence)); + IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); + iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); + + /* Set up entry for this TFD in Tx byte-count array */ + if (info->flags & IEEE80211_TX_CTL_AMPDU) + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, + le16_to_cpu(tx_cmd->len)); + + pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, + firstlen, PCI_DMA_BIDIRECTIONAL); + + trace_iwlwifi_legacy_dev_tx(priv, + &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], + sizeof(struct iwl_tfd), + &out_cmd->hdr, firstlen, + skb->data + hdr_len, secondlen); + + /* Tell device the write index *just past* this latest filled TFD */ + q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_legacy_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually, + * regardless of the value of ret. "ret" only indicates + * whether or not we should update the write pointer. + */ + + /* + * Avoid atomic ops if it isn't an associated client. + * Also, if this is a packet for aggregation, don't + * increase the counter because the ucode will stop + * aggregation queues when their respective station + * goes to sleep. + */ + if (sta_priv && sta_priv->client && !is_agg) + atomic_inc(&sta_priv->pending_frames); + + if ((iwl_legacy_queue_space(q) < q->high_mark) && + priv->mac80211_registered) { + if (wait_write_ptr) { + spin_lock_irqsave(&priv->lock, flags); + txq->need_update = 1; + iwl_legacy_txq_update_write_ptr(priv, txq); + spin_unlock_irqrestore(&priv->lock, flags); + } else { + iwl_legacy_stop_queue(priv, txq); + } + } + + return 0; + +drop_unlock: + spin_unlock_irqrestore(&priv->lock, flags); + return -1; +} + +static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv, + struct iwl_dma_ptr *ptr, size_t size) +{ + ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, + GFP_KERNEL); + if (!ptr->addr) + return -ENOMEM; + ptr->size = size; + return 0; +} + +static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv, + struct iwl_dma_ptr *ptr) +{ + if (unlikely(!ptr->addr)) + return; + + dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); + memset(ptr, 0, sizeof(*ptr)); +} + +/** + * iwl4965_hw_txq_ctx_free - Free TXQ Context + * + * Destroy all TX DMA queues and structures + */ +void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv) +{ + int txq_id; + + /* Tx queues */ + if (priv->txq) { + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + if (txq_id == priv->cmd_queue) + iwl_legacy_cmd_queue_free(priv); + else + iwl_legacy_tx_queue_free(priv, txq_id); + } + iwl4965_free_dma_ptr(priv, &priv->kw); + + iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls); + + /* free tx queue structure */ + iwl_legacy_txq_mem(priv); +} + +/** + * iwl4965_txq_ctx_alloc - allocate TX queue context + * Allocate all Tx DMA structures and initialize them + * + * @param priv + * @return error code + */ +int iwl4965_txq_ctx_alloc(struct iwl_priv *priv) +{ + int ret; + int txq_id, slots_num; + unsigned long flags; + + /* Free all tx/cmd queues and keep-warm buffer */ + iwl4965_hw_txq_ctx_free(priv); + + ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls, + priv->hw_params.scd_bc_tbls_size); + if (ret) { + IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); + goto error_bc_tbls; + } + /* Alloc keep-warm buffer */ + ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); + if (ret) { + IWL_ERR(priv, "Keep Warm allocation failed\n"); + goto error_kw; + } + + /* allocate tx queue structure */ + ret = iwl_legacy_alloc_txq_mem(priv); + if (ret) + goto error; + + spin_lock_irqsave(&priv->lock, flags); + + /* Turn off all Tx DMA fifos */ + iwl4965_txq_set_sched(priv, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4/#9) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = (txq_id == priv->cmd_queue) ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + ret = iwl_legacy_tx_queue_init(priv, + &priv->txq[txq_id], slots_num, + txq_id); + if (ret) { + IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); + goto error; + } + } + + return ret; + + error: + iwl4965_hw_txq_ctx_free(priv); + iwl4965_free_dma_ptr(priv, &priv->kw); + error_kw: + iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls); + error_bc_tbls: + return ret; +} + +void iwl4965_txq_ctx_reset(struct iwl_priv *priv) +{ + int txq_id, slots_num; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Turn off all Tx DMA fifos */ + iwl4965_txq_set_sched(priv, 0); + + /* Tell NIC where to find the "keep warm" buffer */ + iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Alloc and init all Tx queues, including the command queue (#4) */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { + slots_num = txq_id == priv->cmd_queue ? + TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; + iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id], + slots_num, txq_id); + } +} + +/** + * iwl4965_txq_ctx_stop - Stop all Tx DMA channels + */ +void iwl4965_txq_ctx_stop(struct iwl_priv *priv) +{ + int ch, txq_id; + unsigned long flags; + + /* Turn off all Tx DMA fifos */ + spin_lock_irqsave(&priv->lock, flags); + + iwl4965_txq_set_sched(priv, 0); + + /* Stop each Tx DMA channel, and wait for it to be idle */ + for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { + iwl_legacy_write_direct32(priv, + FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); + if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), + 1000)) + IWL_ERR(priv, "Failing on timeout while stopping" + " DMA channel %d [0x%08x]", ch, + iwl_legacy_read_direct32(priv, + FH_TSSR_TX_STATUS_REG)); + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (!priv->txq) + return; + + /* Unmap DMA from host system and free skb's */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + if (txq_id == priv->cmd_queue) + iwl_legacy_cmd_queue_unmap(priv); + else + iwl_legacy_tx_queue_unmap(priv, txq_id); +} + +/* + * Find first available (lowest unused) Tx Queue, mark it "active". + * Called only when finding queue for aggregation. + * Should never return anything < 7, because they should already + * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) + */ +static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv) +{ + int txq_id; + + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) + return txq_id; + return -1; +} + +/** + * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration + */ +static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv, + u16 txq_id) +{ + /* Simply stop the queue, but don't change any configuration; + * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ + iwl_legacy_write_prph(priv, + IWL49_SCD_QUEUE_STATUS_BITS(txq_id), + (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)| + (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); +} + +/** + * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue + */ +static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, + u16 txq_id) +{ + u32 tbl_dw_addr; + u32 tbl_dw; + u16 scd_q2ratid; + + scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; + + tbl_dw_addr = priv->scd_base_addr + + IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); + + tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr); + + if (txq_id & 0x1) + tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); + else + tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); + + iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw); + + return 0; +} + +/** + * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue + * + * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE, + * i.e. it must be one of the higher queues used for aggregation + */ +static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id, + int tx_fifo, int sta_id, int tid, u16 ssn_idx) +{ + unsigned long flags; + u16 ra_tid; + int ret; + + if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || + (IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { + IWL_WARN(priv, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWL49_FIRST_AMPDU_QUEUE, + IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues - 1); + return -EINVAL; + } + + ra_tid = BUILD_RAxTID(sta_id, tid); + + /* Modify device's station table to Tx this TID */ + ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid); + if (ret) + return ret; + + spin_lock_irqsave(&priv->lock, flags); + + /* Stop this Tx queue before configuring it */ + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + /* Map receiver-address / traffic-ID to this queue */ + iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); + + /* Set this queue as a chain-building queue */ + iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + /* Place first TFD at index corresponding to start sequence number. + * Assumes that ssn_idx is valid (!= 0xFFF) */ + priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + /* Set up Tx window size and frame limit for this queue */ + iwl_legacy_write_targ_mem(priv, + priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id), + (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), + (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) + & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); + + /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + + +int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn) +{ + int sta_id; + int tx_fifo; + int txq_id; + int ret; + unsigned long flags; + struct iwl_tid_data *tid_data; + + tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid); + if (unlikely(tx_fifo < 0)) + return tx_fifo; + + IWL_WARN(priv, "%s on ra = %pM tid = %d\n", + __func__, sta->addr, tid); + + sta_id = iwl_legacy_sta_id(sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Start AGG on invalid station\n"); + return -ENXIO; + } + if (unlikely(tid >= MAX_TID_COUNT)) + return -EINVAL; + + if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { + IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); + return -ENXIO; + } + + txq_id = iwl4965_txq_ctx_activate_free(priv); + if (txq_id == -1) { + IWL_ERR(priv, "No free aggregation queue available\n"); + return -ENXIO; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + tid_data = &priv->stations[sta_id].tid[tid]; + *ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.txq_id = txq_id; + iwl_legacy_set_swq_id(&priv->txq[txq_id], + iwl4965_get_ac_from_tid(tid), txq_id); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo, + sta_id, tid, *ssn); + if (ret) + return ret; + + spin_lock_irqsave(&priv->sta_lock, flags); + tid_data = &priv->stations[sta_id].tid[tid]; + if (tid_data->tfds_in_queue == 0) { + IWL_DEBUG_HT(priv, "HW queue is empty\n"); + tid_data->agg.state = IWL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); + } else { + IWL_DEBUG_HT(priv, + "HW queue is NOT empty: %d packets in HW queue\n", + tid_data->tfds_in_queue); + tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; + } + spin_unlock_irqrestore(&priv->sta_lock, flags); + return ret; +} + +/** + * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE + * priv->lock must be held by the caller + */ +static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, + u16 ssn_idx, u8 tx_fifo) +{ + if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || + (IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { + IWL_WARN(priv, + "queue number out of range: %d, must be %d to %d\n", + txq_id, IWL49_FIRST_AMPDU_QUEUE, + IWL49_FIRST_AMPDU_QUEUE + + priv->cfg->base_params->num_of_ampdu_queues - 1); + return -EINVAL; + } + + iwl4965_tx_queue_stop_scheduler(priv, txq_id); + + iwl_legacy_clear_bits_prph(priv, + IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); + + priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); + + iwl_legacy_clear_bits_prph(priv, + IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); + iwl_txq_ctx_deactivate(priv, txq_id); + iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); + + return 0; +} + +int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid) +{ + int tx_fifo_id, txq_id, sta_id, ssn; + struct iwl_tid_data *tid_data; + int write_ptr, read_ptr; + unsigned long flags; + + tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid); + if (unlikely(tx_fifo_id < 0)) + return tx_fifo_id; + + sta_id = iwl_legacy_sta_id(sta); + + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); + return -ENXIO; + } + + spin_lock_irqsave(&priv->sta_lock, flags); + + tid_data = &priv->stations[sta_id].tid[tid]; + ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; + txq_id = tid_data->agg.txq_id; + + switch (priv->stations[sta_id].tid[tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* + * This can happen if the peer stops aggregation + * again before we've had a chance to drain the + * queue we selected previously, i.e. before the + * session was really started completely. + */ + IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); + goto turn_off; + case IWL_AGG_ON: + break; + default: + IWL_WARN(priv, "Stopping AGG while state not ON or starting\n"); + } + + write_ptr = priv->txq[txq_id].q.write_ptr; + read_ptr = priv->txq[txq_id].q.read_ptr; + + /* The queue is not empty */ + if (write_ptr != read_ptr) { + IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n"); + priv->stations[sta_id].tid[tid].agg.state = + IWL_EMPTYING_HW_QUEUE_DELBA; + spin_unlock_irqrestore(&priv->sta_lock, flags); + return 0; + } + + IWL_DEBUG_HT(priv, "HW queue is empty\n"); + turn_off: + priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; + + /* do not restore/save irqs */ + spin_unlock(&priv->sta_lock); + spin_lock(&priv->lock); + + /* + * the only reason this call can fail is queue number out of range, + * which can happen if uCode is reloaded and all the station + * information are lost. if it is outside the range, there is no need + * to deactivate the uCode queue, just return "success" to allow + * mac80211 to clean up it own data. + */ + iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id); + spin_unlock_irqrestore(&priv->lock, flags); + + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + + return 0; +} + +int iwl4965_txq_check_empty(struct iwl_priv *priv, + int sta_id, u8 tid, int txq_id) +{ + struct iwl_queue *q = &priv->txq[txq_id].q; + u8 *addr = priv->stations[sta_id].sta.sta.addr; + struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; + struct iwl_rxon_context *ctx; + + ctx = &priv->contexts[priv->stations[sta_id].ctxid]; + + lockdep_assert_held(&priv->sta_lock); + + switch (priv->stations[sta_id].tid[tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_DELBA: + /* We are reclaiming the last packet of the */ + /* aggregated HW queue */ + if ((txq_id == tid_data->agg.txq_id) && + (q->read_ptr == q->write_ptr)) { + u16 ssn = SEQ_TO_SN(tid_data->seq_number); + int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid); + IWL_DEBUG_HT(priv, + "HW queue empty: continue DELBA flow\n"); + iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo); + tid_data->agg.state = IWL_AGG_OFF; + ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid); + } + break; + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* We are reclaiming the last packet of the queue */ + if (tid_data->tfds_in_queue == 0) { + IWL_DEBUG_HT(priv, + "HW queue empty: continue ADDBA flow\n"); + tid_data->agg.state = IWL_AGG_ON; + ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid); + } + break; + } + + return 0; +} + +static void iwl4965_non_agg_tx_status(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr1) +{ + struct ieee80211_sta *sta; + struct iwl_station_priv *sta_priv; + + rcu_read_lock(); + sta = ieee80211_find_sta(ctx->vif, addr1); + if (sta) { + sta_priv = (void *)sta->drv_priv; + /* avoid atomic ops if this isn't a client */ + if (sta_priv->client && + atomic_dec_return(&sta_priv->pending_frames) == 0) + ieee80211_sta_block_awake(priv->hw, sta, false); + } + rcu_read_unlock(); +} + +static void +iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info, + bool is_agg) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data; + + if (!is_agg) + iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1); + + ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); +} + +int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + struct iwl_tx_info *tx_info; + int nfreed = 0; + struct ieee80211_hdr *hdr; + + if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + index, q->n_bd, q->write_ptr, q->read_ptr); + return 0; + } + + for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd); + q->read_ptr != index; + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + tx_info = &txq->txb[txq->q.read_ptr]; + iwl4965_tx_status(priv, tx_info, + txq_id >= IWL4965_FIRST_AMPDU_QUEUE); + + hdr = (struct ieee80211_hdr *)tx_info->skb->data; + if (hdr && ieee80211_is_data_qos(hdr->frame_control)) + nfreed++; + tx_info->skb = NULL; + + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + } + return nfreed; +} + +/** + * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack + * + * Go through block-ack's bitmap of ACK'd frames, update driver's record of + * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. + */ +static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_ht_agg *agg, + struct iwl_compressed_ba_resp *ba_resp) + +{ + int i, sh, ack; + u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); + u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); + int successes = 0; + struct ieee80211_tx_info *info; + u64 bitmap, sent_bitmap; + + if (unlikely(!agg->wait_for_ba)) { + if (unlikely(ba_resp->bitmap)) + IWL_ERR(priv, "Received BA when not expected\n"); + return -EINVAL; + } + + /* Mark that the expected block-ack response arrived */ + agg->wait_for_ba = 0; + IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, + ba_resp->seq_ctl); + + /* Calculate shift to align block-ack bits with our Tx window bits */ + sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4); + if (sh < 0) /* tbw something is wrong with indices */ + sh += 0x100; + + if (agg->frame_count > (64 - sh)) { + IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); + return -1; + } + + /* don't use 64-bit values for now */ + bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; + + /* check for success or failure according to the + * transmitted bitmap and block-ack bitmap */ + sent_bitmap = bitmap & agg->bitmap; + + /* For each frame attempted in aggregation, + * update driver's record of tx frame's status. */ + i = 0; + while (sent_bitmap) { + ack = sent_bitmap & 1ULL; + successes += ack; + IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n", + ack ? "ACK" : "NACK", i, + (agg->start_idx + i) & 0xff, + agg->start_idx + i); + sent_bitmap >>= 1; + ++i; + } + + IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", + (unsigned long long)bitmap); + + info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); + memset(&info->status, 0, sizeof(info->status)); + info->flags |= IEEE80211_TX_STAT_ACK; + info->flags |= IEEE80211_TX_STAT_AMPDU; + info->status.ampdu_ack_len = successes; + info->status.ampdu_len = agg->frame_count; + iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info); + + return 0; +} + +/** + * translate ucode response to mac80211 tx status control values + */ +void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, + struct ieee80211_tx_info *info) +{ + struct ieee80211_tx_rate *r = &info->control.rates[0]; + + info->antenna_sel_tx = + ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); + if (rate_n_flags & RATE_MCS_HT_MSK) + r->flags |= IEEE80211_TX_RC_MCS; + if (rate_n_flags & RATE_MCS_GF_MSK) + r->flags |= IEEE80211_TX_RC_GREEN_FIELD; + if (rate_n_flags & RATE_MCS_HT40_MSK) + r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + if (rate_n_flags & RATE_MCS_DUP_MSK) + r->flags |= IEEE80211_TX_RC_DUP_DATA; + if (rate_n_flags & RATE_MCS_SGI_MSK) + r->flags |= IEEE80211_TX_RC_SHORT_GI; + r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band); +} + +/** + * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA + * + * Handles block-acknowledge notification from device, which reports success + * of frames sent via aggregation. + */ +void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; + struct iwl_tx_queue *txq = NULL; + struct iwl_ht_agg *agg; + int index; + int sta_id; + int tid; + unsigned long flags; + + /* "flow" corresponds to Tx queue */ + u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); + + /* "ssn" is start of block-ack Tx window, corresponds to index + * (in Tx queue's circular buffer) of first TFD/frame in window */ + u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); + + if (scd_flow >= priv->hw_params.max_txq_num) { + IWL_ERR(priv, + "BUG_ON scd_flow is bigger than number of queues\n"); + return; + } + + txq = &priv->txq[scd_flow]; + sta_id = ba_resp->sta_id; + tid = ba_resp->tid; + agg = &priv->stations[sta_id].tid[tid].agg; + if (unlikely(agg->txq_id != scd_flow)) { + /* + * FIXME: this is a uCode bug which need to be addressed, + * log the information and return for now! + * since it is possible happen very often and in order + * not to fill the syslog, don't enable the logging by default + */ + IWL_DEBUG_TX_REPLY(priv, + "BA scd_flow %d does not match txq_id %d\n", + scd_flow, agg->txq_id); + return; + } + + /* Find index just before block-ack window */ + index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); + + spin_lock_irqsave(&priv->sta_lock, flags); + + IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " + "sta_id = %d\n", + agg->wait_for_ba, + (u8 *) &ba_resp->sta_addr_lo32, + ba_resp->sta_id); + IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx," + "scd_flow = " + "%d, scd_ssn = %d\n", + ba_resp->tid, + ba_resp->seq_ctl, + (unsigned long long)le64_to_cpu(ba_resp->bitmap), + ba_resp->scd_flow, + ba_resp->scd_ssn); + IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n", + agg->start_idx, + (unsigned long long)agg->bitmap); + + /* Update driver's record of ACK vs. not for each frame in window */ + iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); + + /* Release all TFDs before the SSN, i.e. all TFDs in front of + * block-ack window (we assume that they've been successfully + * transmitted ... if not, it's too late anyway). */ + if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { + /* calculate mac80211 ampdu sw queue to wake */ + int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index); + iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed); + + if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) && + priv->mac80211_registered && + (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) + iwl_legacy_wake_queue(priv, txq); + + iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow); + } + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +const char *iwl4965_get_tx_fail_reason(u32 status) +{ +#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x +#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x + + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + return "SUCCESS"; + TX_STATUS_POSTPONE(DELAY); + TX_STATUS_POSTPONE(FEW_BYTES); + TX_STATUS_POSTPONE(QUIET_PERIOD); + TX_STATUS_POSTPONE(CALC_TTAK); + TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY); + TX_STATUS_FAIL(SHORT_LIMIT); + TX_STATUS_FAIL(LONG_LIMIT); + TX_STATUS_FAIL(FIFO_UNDERRUN); + TX_STATUS_FAIL(DRAIN_FLOW); + TX_STATUS_FAIL(RFKILL_FLUSH); + TX_STATUS_FAIL(LIFE_EXPIRE); + TX_STATUS_FAIL(DEST_PS); + TX_STATUS_FAIL(HOST_ABORTED); + TX_STATUS_FAIL(BT_RETRY); + TX_STATUS_FAIL(STA_INVALID); + TX_STATUS_FAIL(FRAG_DROPPED); + TX_STATUS_FAIL(TID_DISABLE); + TX_STATUS_FAIL(FIFO_FLUSHED); + TX_STATUS_FAIL(INSUFFICIENT_CF_POLL); + TX_STATUS_FAIL(PASSIVE_NO_RX); + TX_STATUS_FAIL(NO_BEACON_ON_RADAR); + } + + return "UNKNOWN"; + +#undef TX_STATUS_FAIL +#undef TX_STATUS_POSTPONE +} +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c new file mode 100644 index 000000000000..001d148feb94 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c @@ -0,0 +1,166 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sched.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-4965-hw.h" +#include "iwl-4965.h" +#include "iwl-4965-calib.h" + +#define IWL_AC_UNSET -1 + +/** + * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, + * using sample data 100 bytes apart. If these sample points are good, + * it's a pretty good bet that everything between them is good, too. + */ +static int +iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) +{ + u32 val; + int ret = 0; + u32 errcnt = 0; + u32 i; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, + i + IWL4965_RTC_INST_LOWER_BOUND); + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + ret = -EIO; + errcnt++; + if (errcnt >= 3) + break; + } + } + + return ret; +} + +/** + * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host, + * looking at all data. + */ +static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image, + u32 len) +{ + u32 val; + u32 save_len = len; + int ret = 0; + u32 errcnt; + + IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); + + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, + IWL4965_RTC_INST_LOWER_BOUND); + + errcnt = 0; + for (; len > 0; len -= sizeof(u32), image++) { + /* read data comes through single port, auto-incr addr */ + /* NOTE: Use the debugless read so we don't flood kernel log + * if IWL_DL_IO is set */ + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (val != le32_to_cpu(*image)) { + IWL_ERR(priv, "uCode INST section is invalid at " + "offset 0x%x, is 0x%x, s/b 0x%x\n", + save_len - len, val, le32_to_cpu(*image)); + ret = -EIO; + errcnt++; + if (errcnt >= 20) + break; + } + } + + if (!errcnt) + IWL_DEBUG_INFO(priv, + "ucode image in INSTRUCTION memory is good\n"); + + return ret; +} + +/** + * iwl4965_verify_ucode - determine which instruction image is in SRAM, + * and verify its contents + */ +int iwl4965_verify_ucode(struct iwl_priv *priv) +{ + __le32 *image; + u32 len; + int ret; + + /* Try bootstrap */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + ret = iwl4965_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); + return 0; + } + + /* Try initialize */ + image = (__le32 *)priv->ucode_init.v_addr; + len = priv->ucode_init.len; + ret = iwl4965_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); + return 0; + } + + /* Try runtime/protocol */ + image = (__le32 *)priv->ucode_code.v_addr; + len = priv->ucode_code.len; + ret = iwl4965_verify_inst_sparse(priv, image, len); + if (!ret) { + IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); + return 0; + } + + IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); + + /* Since nothing seems to match, show first several data entries in + * instruction SRAM, so maybe visual inspection will give a clue. + * Selection of bootstrap image (vs. other images) is arbitrary. */ + image = (__le32 *)priv->ucode_boot.v_addr; + len = priv->ucode_boot.len; + ret = iwl4965_verify_inst_full(priv, image, len); + + return ret; +} diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c index 91a9f5253469..f5433c74b845 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlegacy/iwl-4965.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -43,12 +43,11 @@ #include "iwl-core.h" #include "iwl-io.h" #include "iwl-helpers.h" -#include "iwl-agn-calib.h" +#include "iwl-4965-calib.h" #include "iwl-sta.h" -#include "iwl-agn-led.h" -#include "iwl-agn.h" -#include "iwl-agn-debugfs.h" -#include "iwl-legacy.h" +#include "iwl-4965-led.h" +#include "iwl-4965.h" +#include "iwl-4965-debugfs.h" static int iwl4965_send_tx_power(struct iwl_priv *priv); static int iwl4965_hw_get_temperature(struct iwl_priv *priv); @@ -74,11 +73,11 @@ static int iwl4965_verify_bsm(struct iwl_priv *priv) IWL_DEBUG_INFO(priv, "Begin verify bsm\n"); /* verify BSM SRAM contents */ - val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG); + val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG); for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len; reg += sizeof(u32), image++) { - val = iwl_read_prph(priv, reg); + val = iwl_legacy_read_prph(priv, reg); if (val != le32_to_cpu(*image)) { IWL_ERR(priv, "BSM uCode verification failed at " "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", @@ -158,33 +157,34 @@ static int iwl4965_load_bsm(struct iwl_priv *priv) inst_len = priv->ucode_init.len; data_len = priv->ucode_init_data.len; - iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); - iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); - iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); - iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); /* Fill BSM memory with bootstrap instructions */ for (reg_offset = BSM_SRAM_LOWER_BOUND; reg_offset < BSM_SRAM_LOWER_BOUND + len; reg_offset += sizeof(u32), image++) - _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image)); + _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image)); ret = iwl4965_verify_bsm(priv); if (ret) return ret; /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ - iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); - iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND); - iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); + iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0); + iwl_legacy_write_prph(priv, + BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND); + iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); /* Load bootstrap code into instruction SRAM now, * to prepare to load "initialize" uCode */ - iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); + iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START); /* Wait for load of bootstrap uCode to finish */ for (i = 0; i < 100; i++) { - done = iwl_read_prph(priv, BSM_WR_CTRL_REG); + done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG); if (!(done & BSM_WR_CTRL_REG_BIT_START)) break; udelay(10); @@ -198,7 +198,8 @@ static int iwl4965_load_bsm(struct iwl_priv *priv) /* Enable future boot loads whenever power management unit triggers it * (e.g. when powering back up after power-save shutdown) */ - iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); + iwl_legacy_write_prph(priv, + BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN); return 0; @@ -224,14 +225,14 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv) pdata = priv->ucode_data_backup.p_addr >> 4; /* Tell bootstrap uCode where to find image to load */ - iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); - iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); - iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, priv->ucode_data.len); /* Inst byte count must be last to set up, bit 31 signals uCode * that all new ptr/size info is in place */ - iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, priv->ucode_code.len | BSM_DRAM_INST_LOAD); IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); @@ -251,18 +252,10 @@ static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv) */ static void iwl4965_init_alive_start(struct iwl_priv *priv) { - /* Check alive response for "valid" sign from uCode */ - if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { - /* We had an error bringing up the hardware, so take it - * all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); - goto restart; - } - /* Bootstrap uCode has loaded initialize uCode ... verify inst image. * This is a paranoid check, because we would not have gotten the * "initialize" alive if code weren't properly loaded. */ - if (iwl_verify_ucode(priv)) { + if (iwl4965_verify_ucode(priv)) { /* Runtime instruction load was bad; * take it all the way back down so we can try again */ IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); @@ -288,7 +281,7 @@ restart: queue_work(priv->workqueue, &priv->restart); } -static bool is_ht40_channel(__le32 rxon_flags) +static bool iw4965_is_ht40_channel(__le32 rxon_flags) { int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >> RXON_FLG_CHANNEL_MODE_POS; @@ -296,23 +289,6 @@ static bool is_ht40_channel(__le32 rxon_flags) (chan_mod == CHANNEL_MODE_MIXED)); } -/* - * EEPROM handlers - */ -static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv) -{ - return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET); -} - -/* - * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask - * must be called under priv->lock and mac access - */ -static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask) -{ - iwl_write_prph(priv, IWL49_SCD_TXFACT, mask); -} - static void iwl4965_nic_config(struct iwl_priv *priv) { unsigned long flags; @@ -320,22 +296,23 @@ static void iwl4965_nic_config(struct iwl_priv *priv) spin_lock_irqsave(&priv->lock, flags); - radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); + radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG); /* write radio config values to register */ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX) - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | EEPROM_RF_CFG_STEP_MSK(radio_cfg) | EEPROM_RF_CFG_DASH_MSK(radio_cfg)); /* set CSR_HW_CONFIG_REG for uCode use */ - iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); priv->calib_info = (struct iwl_eeprom_calib_info *) - iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET); + iwl_legacy_eeprom_query_addr(priv, + EEPROM_4965_CALIB_TXPOWER_OFFSET); spin_unlock_irqrestore(&priv->lock, flags); } @@ -348,7 +325,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv) struct iwl_chain_noise_data *data = &(priv->chain_noise_data); if ((data->state == IWL_CHAIN_NOISE_ALIVE) && - iwl_is_any_associated(priv)) { + iwl_legacy_is_any_associated(priv)) { struct iwl_calib_diff_gain_cmd cmd; /* clear data for chain noise calibration algorithm */ @@ -365,7 +342,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv) cmd.diff_gain_a = 0; cmd.diff_gain_b = 0; cmd.diff_gain_c = 0; - if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, + if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, sizeof(cmd), &cmd)) IWL_ERR(priv, "Could not send REPLY_PHY_CALIBRATION_CMD\n"); @@ -374,237 +351,6 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv) } } -static void iwl4965_gain_computation(struct iwl_priv *priv, - u32 *average_noise, - u16 min_average_noise_antenna_i, - u32 min_average_noise, - u8 default_chain) -{ - int i, ret; - struct iwl_chain_noise_data *data = &priv->chain_noise_data; - - data->delta_gain_code[min_average_noise_antenna_i] = 0; - - for (i = default_chain; i < NUM_RX_CHAINS; i++) { - s32 delta_g = 0; - - if (!(data->disconn_array[i]) && - (data->delta_gain_code[i] == - CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) { - delta_g = average_noise[i] - min_average_noise; - data->delta_gain_code[i] = (u8)((delta_g * 10) / 15); - data->delta_gain_code[i] = - min(data->delta_gain_code[i], - (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); - - data->delta_gain_code[i] = - (data->delta_gain_code[i] | (1 << 2)); - } else { - data->delta_gain_code[i] = 0; - } - } - IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n", - data->delta_gain_code[0], - data->delta_gain_code[1], - data->delta_gain_code[2]); - - /* Differential gain gets sent to uCode only once */ - if (!data->radio_write) { - struct iwl_calib_diff_gain_cmd cmd; - data->radio_write = 1; - - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD; - cmd.diff_gain_a = data->delta_gain_code[0]; - cmd.diff_gain_b = data->delta_gain_code[1]; - cmd.diff_gain_c = data->delta_gain_code[2]; - ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, - sizeof(cmd), &cmd); - if (ret) - IWL_DEBUG_CALIB(priv, "fail sending cmd " - "REPLY_PHY_CALIBRATION_CMD\n"); - - /* TODO we might want recalculate - * rx_chain in rxon cmd */ - - /* Mark so we run this algo only once! */ - data->state = IWL_CHAIN_NOISE_CALIBRATED; - } -} - -static void iwl4965_bg_txpower_work(struct work_struct *work) -{ - struct iwl_priv *priv = container_of(work, struct iwl_priv, - txpower_work); - - /* If a scan happened to start before we got here - * then just return; the statistics notification will - * kick off another scheduled work to compensate for - * any temperature delta we missed here. */ - if (test_bit(STATUS_EXIT_PENDING, &priv->status) || - test_bit(STATUS_SCANNING, &priv->status)) - return; - - mutex_lock(&priv->mutex); - - /* Regardless of if we are associated, we must reconfigure the - * TX power since frames can be sent on non-radar channels while - * not associated */ - iwl4965_send_tx_power(priv); - - /* Update last_temperature to keep is_calib_needed from running - * when it isn't needed... */ - priv->last_temperature = priv->temperature; - - mutex_unlock(&priv->mutex); -} - -/* - * Acquire priv->lock before calling this function ! - */ -static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) -{ - iwl_write_direct32(priv, HBUS_TARG_WRPTR, - (index & 0xff) | (txq_id << 8)); - iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index); -} - -/** - * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue - * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed - * @scd_retry: (1) Indicates queue will be used in aggregation mode - * - * NOTE: Acquire priv->lock before calling this function ! - */ -static void iwl4965_tx_queue_set_status(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - int tx_fifo_id, int scd_retry) -{ - int txq_id = txq->q.id; - - /* Find out whether to activate Tx queue */ - int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; - - /* Set up and activate */ - iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), - (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | - (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) | - (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) | - (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) | - IWL49_SCD_QUEUE_STTS_REG_MSK); - - txq->sched_retry = scd_retry; - - IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n", - active ? "Activate" : "Deactivate", - scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); -} - -static const s8 default_queue_to_tx_fifo[] = { - IWL_TX_FIFO_VO, - IWL_TX_FIFO_VI, - IWL_TX_FIFO_BE, - IWL_TX_FIFO_BK, - IWL49_CMD_FIFO_NUM, - IWL_TX_FIFO_UNUSED, - IWL_TX_FIFO_UNUSED, -}; - -static int iwl4965_alive_notify(struct iwl_priv *priv) -{ - u32 a; - unsigned long flags; - int i, chan; - u32 reg_val; - - spin_lock_irqsave(&priv->lock, flags); - - /* Clear 4965's internal Tx Scheduler data base */ - priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR); - a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET; - for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4) - iwl_write_targ_mem(priv, a, 0); - for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) - iwl_write_targ_mem(priv, a, 0); - for (; a < priv->scd_base_addr + - IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) - iwl_write_targ_mem(priv, a, 0); - - /* Tel 4965 where to find Tx byte count tables */ - iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR, - priv->scd_bc_tbls.dma >> 10); - - /* Enable DMA channel */ - for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++) - iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); - - /* Update FH chicken bits */ - reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); - iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, - reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); - - /* Disable chain mode for all queues */ - iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0); - - /* Initialize each Tx queue (including the command queue) */ - for (i = 0; i < priv->hw_params.max_txq_num; i++) { - - /* TFD circular buffer read/write indexes */ - iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0); - iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); - - /* Max Tx Window size for Scheduler-ACK mode */ - iwl_write_targ_mem(priv, priv->scd_base_addr + - IWL49_SCD_CONTEXT_QUEUE_OFFSET(i), - (SCD_WIN_SIZE << - IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & - IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); - - /* Frame limit */ - iwl_write_targ_mem(priv, priv->scd_base_addr + - IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) + - sizeof(u32), - (SCD_FRAME_LIMIT << - IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & - IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); - - } - iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, - (1 << priv->hw_params.max_txq_num) - 1); - - /* Activate all Tx DMA/FIFO channels */ - priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6)); - - iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0); - - /* make sure all queue are not stopped */ - memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); - for (i = 0; i < 4; i++) - atomic_set(&priv->queue_stop_count[i], 0); - - /* reset to 0 to enable all the queue first */ - priv->txq_ctx_active_msk = 0; - /* Map each Tx/cmd queue to its corresponding fifo */ - BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7); - - for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { - int ac = default_queue_to_tx_fifo[i]; - - iwl_txq_ctx_activate(priv, i); - - if (ac == IWL_TX_FIFO_UNUSED) - continue; - - iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); - } - - spin_unlock_irqrestore(&priv->lock, flags); - - return 0; -} - static struct iwl_sensitivity_ranges iwl4965_sensitivity = { .min_nrg_cck = 97, .max_nrg_cck = 0, /* not used, set to 0 */ @@ -666,15 +412,15 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv) priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; - priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); - priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); + priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant); + priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant); priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; iwl4965_set_ct_threshold(priv); priv->hw_params.sens = &iwl4965_sensitivity; - priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; + priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS; return 0; } @@ -1158,9 +904,9 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel, IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band, is_ht40); - ch_info = iwl_get_channel_info(priv, priv->band, channel); + ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel); - if (!is_channel_valid(ch_info)) + if (!iwl_legacy_is_channel_valid(ch_info)) return -EINVAL; /* get txatten group, used to select 1) thermal txpower adjustment @@ -1384,7 +1130,7 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv) band = priv->band == IEEE80211_BAND_2GHZ; - is_ht40 = is_ht40_channel(ctx->active.flags); + is_ht40 = iw4965_is_ht40_channel(ctx->active.flags); if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) ctrl_chan_high = 1; @@ -1398,7 +1144,8 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv) if (ret) goto out; - ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); + ret = iwl_legacy_send_cmd_pdu(priv, + REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); out: return ret; @@ -1409,8 +1156,8 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv, { int ret = 0; struct iwl4965_rxon_assoc_cmd rxon_assoc; - const struct iwl_rxon_cmd *rxon1 = &ctx->staging; - const struct iwl_rxon_cmd *rxon2 = &ctx->active; + const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging; + const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active; if ((rxon1->flags == rxon2->flags) && (rxon1->filter_flags == rxon2->filter_flags) && @@ -1436,7 +1183,7 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv, ctx->staging.ofdm_ht_dual_stream_basic_rates; rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; - ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, + ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC, sizeof(rxon_assoc), &rxon_assoc, NULL); if (ret) return ret; @@ -1447,12 +1194,12 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv, static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { /* cast away the const for active_rxon in this function */ - struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active; + struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active; int ret; bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); - if (!iwl_is_alive(priv)) + if (!iwl_legacy_is_alive(priv)) return -EBUSY; if (!ctx->is_active) @@ -1461,7 +1208,7 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c /* always get timestamp with Rx frame */ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; - ret = iwl_check_rxon_cmd(priv, ctx); + ret = iwl_legacy_check_rxon_cmd(priv, ctx); if (ret) { IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n"); return -EINVAL; @@ -1475,21 +1222,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c (priv->switch_rxon.channel != ctx->staging.channel)) { IWL_DEBUG_11H(priv, "abort channel switch on %d\n", le16_to_cpu(priv->switch_rxon.channel)); - iwl_chswitch_done(priv, false); + iwl_legacy_chswitch_done(priv, false); } /* If we don't need to send a full RXON, we can use * iwl_rxon_assoc_cmd which is used to reconfigure filter * and other flags for the current radio configuration. */ - if (!iwl_full_rxon_required(priv, ctx)) { - ret = iwl_send_rxon_assoc(priv, ctx); + if (!iwl_legacy_full_rxon_required(priv, ctx)) { + ret = iwl_legacy_send_rxon_assoc(priv, ctx); if (ret) { IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret); return ret; } memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); - iwl_print_rx_config_cmd(priv, ctx); + iwl_legacy_print_rx_config_cmd(priv, ctx); return 0; } @@ -1497,12 +1244,12 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c * an RXON_ASSOC and the new config wants the associated mask enabled, * we must clear the associated from the active configuration * before we apply the new config */ - if (iwl_is_associated_ctx(ctx) && new_assoc) { + if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) { IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n"); active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; - ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, - sizeof(struct iwl_rxon_cmd), + ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, + sizeof(struct iwl_legacy_rxon_cmd), active_rxon); /* If the mask clearing failed then we set @@ -1512,9 +1259,9 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); return ret; } - iwl_clear_ucode_stations(priv, ctx); - iwl_restore_stations(priv, ctx); - ret = iwl_restore_default_wep_keys(priv, ctx); + iwl_legacy_clear_ucode_stations(priv, ctx); + iwl_legacy_restore_stations(priv, ctx); + ret = iwl4965_restore_default_wep_keys(priv, ctx); if (ret) { IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); return ret; @@ -1529,24 +1276,25 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr); - iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto); + iwl_legacy_set_rxon_hwcrypto(priv, ctx, + !priv->cfg->mod_params->sw_crypto); /* Apply the new configuration * RXON unassoc clears the station table in uCode so restoration of * stations is needed after it (the RXON command) completes */ if (!new_assoc) { - ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, - sizeof(struct iwl_rxon_cmd), &ctx->staging); + ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, + sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging); if (ret) { IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); return ret; } IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n"); memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); - iwl_clear_ucode_stations(priv, ctx); - iwl_restore_stations(priv, ctx); - ret = iwl_restore_default_wep_keys(priv, ctx); + iwl_legacy_clear_ucode_stations(priv, ctx); + iwl_legacy_restore_stations(priv, ctx); + ret = iwl4965_restore_default_wep_keys(priv, ctx); if (ret) { IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret); return ret; @@ -1557,21 +1305,21 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c /* Apply the new configuration * RXON assoc doesn't clear the station table in uCode, */ - ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, - sizeof(struct iwl_rxon_cmd), &ctx->staging); + ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd, + sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging); if (ret) { IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); return ret; } memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon)); } - iwl_print_rx_config_cmd(priv, ctx); + iwl_legacy_print_rx_config_cmd(priv, ctx); - iwl_init_sensitivity(priv); + iwl4965_init_sensitivity(priv); /* If we issue a new RXON command which required a tune then we must * send a new TXPOWER command or we won't be able to Tx any frames */ - ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); + ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true); if (ret) { IWL_ERR(priv, "Error sending TX power (%d)\n", ret); return ret; @@ -1598,7 +1346,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, struct ieee80211_vif *vif = ctx->vif; band = priv->band == IEEE80211_BAND_2GHZ; - is_ht40 = is_ht40_channel(ctx->staging.flags); + is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags); if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) @@ -1629,19 +1377,19 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, else { switch_time_in_usec = vif->bss_conf.beacon_int * switch_count * TIME_UNIT; - ucode_switch_time = iwl_usecs_to_beacons(priv, + ucode_switch_time = iwl_legacy_usecs_to_beacons(priv, switch_time_in_usec, beacon_interval); - cmd.switch_time = iwl_add_beacon_time(priv, + cmd.switch_time = iwl_legacy_add_beacon_time(priv, priv->ucode_beacon_time, ucode_switch_time, beacon_interval); } IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", cmd.switch_time); - ch_info = iwl_get_channel_info(priv, priv->band, ch); + ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch); if (ch_info) - cmd.expect_beacon = is_channel_radar(ch_info); + cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info); else { IWL_ERR(priv, "invalid channel switch from %u to %u\n", ctx->active.channel, ch); @@ -1658,7 +1406,8 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, priv->switch_rxon.channel = cmd.channel; priv->switch_rxon.switch_in_progress = true; - return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); + return iwl_legacy_send_cmd_pdu(priv, + REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); } /** @@ -1700,7 +1449,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv) u32 R4; if (test_bit(STATUS_TEMPERATURE, &priv->status) && - (priv->_agn.statistics.flag & + (priv->_4965.statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)) { IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n"); R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]); @@ -1725,7 +1474,7 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv) if (!test_bit(STATUS_TEMPERATURE, &priv->status)) vt = sign_extend32(R4, 23); else - vt = sign_extend32(le32_to_cpu(priv->_agn.statistics. + vt = sign_extend32(le32_to_cpu(priv->_4965.statistics. general.common.temperature), 23); IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); @@ -1810,7 +1559,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv) } priv->temperature = temp; - iwl_tt_handler(priv); set_bit(STATUS_TEMPERATURE, &priv->status); if (!priv->disable_tx_power_cal && @@ -1819,152 +1567,6 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv) queue_work(priv->workqueue, &priv->txpower_work); } -/** - * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration - */ -static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv, - u16 txq_id) -{ - /* Simply stop the queue, but don't change any configuration; - * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ - iwl_write_prph(priv, - IWL49_SCD_QUEUE_STATUS_BITS(txq_id), - (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)| - (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); -} - -/** - * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE - * priv->lock must be held by the caller - */ -static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, - u16 ssn_idx, u8 tx_fifo) -{ - if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || - (IWL49_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { - IWL_WARN(priv, - "queue number out of range: %d, must be %d to %d\n", - txq_id, IWL49_FIRST_AMPDU_QUEUE, - IWL49_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues - 1); - return -EINVAL; - } - - iwl4965_tx_queue_stop_scheduler(priv, txq_id); - - iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); - - priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); - priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); - /* supposes that ssn_idx is valid (!= 0xFFF) */ - iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); - - iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); - iwl_txq_ctx_deactivate(priv, txq_id); - iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); - - return 0; -} - -/** - * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue - */ -static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, - u16 txq_id) -{ - u32 tbl_dw_addr; - u32 tbl_dw; - u16 scd_q2ratid; - - scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK; - - tbl_dw_addr = priv->scd_base_addr + - IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); - - tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); - - if (txq_id & 0x1) - tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); - else - tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); - - iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); - - return 0; -} - - -/** - * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue - * - * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE, - * i.e. it must be one of the higher queues used for aggregation - */ -static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id, - int tx_fifo, int sta_id, int tid, u16 ssn_idx) -{ - unsigned long flags; - u16 ra_tid; - int ret; - - if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || - (IWL49_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { - IWL_WARN(priv, - "queue number out of range: %d, must be %d to %d\n", - txq_id, IWL49_FIRST_AMPDU_QUEUE, - IWL49_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues - 1); - return -EINVAL; - } - - ra_tid = BUILD_RAxTID(sta_id, tid); - - /* Modify device's station table to Tx this TID */ - ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); - if (ret) - return ret; - - spin_lock_irqsave(&priv->lock, flags); - - /* Stop this Tx queue before configuring it */ - iwl4965_tx_queue_stop_scheduler(priv, txq_id); - - /* Map receiver-address / traffic-ID to this queue */ - iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); - - /* Set this queue as a chain-building queue */ - iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id)); - - /* Place first TFD at index corresponding to start sequence number. - * Assumes that ssn_idx is valid (!= 0xFFF) */ - priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); - priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); - iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); - - /* Set up Tx window size and frame limit for this queue */ - iwl_write_targ_mem(priv, - priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id), - (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & - IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); - - iwl_write_targ_mem(priv, priv->scd_base_addr + - IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), - (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) - & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); - - iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id)); - - /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ - iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); - - spin_unlock_irqrestore(&priv->lock, flags); - - return 0; -} - - static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len) { switch (cmd_id) { @@ -1975,7 +1577,8 @@ static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len) } } -static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data) +static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd, + u8 *data) { struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data; addsta->mode = cmd->mode; @@ -2028,16 +1631,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, status = le16_to_cpu(frame_status[0].status); idx = start_idx; - /* FIXME: code repetition */ IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n", agg->frame_count, agg->start_idx, idx); info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb); info->status.rates[0].count = tx_resp->failure_frame + 1; info->flags &= ~IEEE80211_TX_CTL_AMPDU; - info->flags |= iwl_tx_status_to_mac80211(status); - iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info); - /* FIXME: code repetition end */ + info->flags |= iwl4965_tx_status_to_mac80211(status); + iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info); IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", status & 0xff, tx_resp->failure_frame); @@ -2064,7 +1665,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", agg->frame_count, txq_id, idx); - hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); + hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx); if (!hdr) { IWL_ERR(priv, "BUG_ON idx doesn't point to valid skb" @@ -2115,15 +1716,14 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, return 0; } -static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr) +static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr) { int i; int start = 0; int ret = IWL_INVALID_STATION; unsigned long flags; - if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) || - (priv->iw_mode == NL80211_IFTYPE_AP)) + if ((priv->iw_mode == NL80211_IFTYPE_ADHOC)) start = IWL_STA_ID; if (is_broadcast_ether_addr(addr)) @@ -2159,13 +1759,13 @@ static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr) return ret; } -static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) +static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) { if (priv->iw_mode == NL80211_IFTYPE_STATION) { return IWL_AP_ID; } else { u8 *da = ieee80211_get_DA(hdr); - return iwl_find_station(priv, da); + return iwl4965_find_station(priv, da); } } @@ -2190,7 +1790,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, u8 *qc = NULL; unsigned long flags; - if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) { + if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) { IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d " "is out of range [0-%d] %d %d\n", txq_id, index, txq->q.n_bd, txq->q.write_ptr, @@ -2202,13 +1802,13 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); memset(&info->status, 0, sizeof(info->status)); - hdr = iwl_tx_queue_get_hdr(priv, txq_id, index); + hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index); if (ieee80211_is_data_qos(hdr->frame_control)) { qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & 0xf; } - sta_id = iwl_get_ra_sta_id(priv, hdr); + sta_id = iwl4965_get_ra_sta_id(priv, hdr); if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) { IWL_ERR(priv, "Station not known\n"); return; @@ -2225,114 +1825,89 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index); /* check if BAR is needed */ - if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status)) + if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status)) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; if (txq->q.read_ptr != (scd_ssn & 0xff)) { - index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); + index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff, + txq->q.n_bd); IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " "%d index %d\n", scd_ssn , index); - freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); + freed = iwl4965_tx_queue_reclaim(priv, txq_id, index); if (qc) - iwl_free_tfds_in_queue(priv, sta_id, + iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed); if (priv->mac80211_registered && - (iwl_queue_space(&txq->q) > txq->q.low_mark) && - (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) - iwl_wake_queue(priv, txq); + (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) + && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) + iwl_legacy_wake_queue(priv, txq); } } else { info->status.rates[0].count = tx_resp->failure_frame + 1; - info->flags |= iwl_tx_status_to_mac80211(status); - iwlagn_hwrate_to_tx_control(priv, + info->flags |= iwl4965_tx_status_to_mac80211(status); + iwl4965_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags), info); IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " "rate_n_flags 0x%x retries %d\n", txq_id, - iwl_get_tx_fail_reason(status), status, + iwl4965_get_tx_fail_reason(status), status, le32_to_cpu(tx_resp->rate_n_flags), tx_resp->failure_frame); - freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); + freed = iwl4965_tx_queue_reclaim(priv, txq_id, index); if (qc && likely(sta_id != IWL_INVALID_STATION)) - iwl_free_tfds_in_queue(priv, sta_id, tid, freed); + iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed); else if (sta_id == IWL_INVALID_STATION) IWL_DEBUG_TX_REPLY(priv, "Station not known\n"); if (priv->mac80211_registered && - (iwl_queue_space(&txq->q) > txq->q.low_mark)) - iwl_wake_queue(priv, txq); + (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)) + iwl_legacy_wake_queue(priv, txq); } if (qc && likely(sta_id != IWL_INVALID_STATION)) - iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); + iwl4965_txq_check_empty(priv, sta_id, tid, txq_id); - iwl_check_abort_status(priv, tx_resp->frame_count, status); + iwl4965_check_abort_status(priv, tx_resp->frame_count, status); spin_unlock_irqrestore(&priv->sta_lock, flags); } -static int iwl4965_calc_rssi(struct iwl_priv *priv, - struct iwl_rx_phy_res *rx_resp) +static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) { - /* data from PHY/DSP regarding signal strength, etc., - * contents are always there, not configurable by host. */ - struct iwl4965_rx_non_cfg_phy *ncphy = - (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf; - u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK) - >> IWL49_AGC_DB_POS; - - u32 valid_antennae = - (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK) - >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET; - u8 max_rssi = 0; - u32 i; - - /* Find max rssi among 3 possible receivers. - * These values are measured by the digital signal processor (DSP). - * They should stay fairly constant even as the signal strength varies, - * if the radio's automatic gain control (AGC) is working right. - * AGC value (see below) will provide the "interesting" info. */ - for (i = 0; i < 3; i++) - if (valid_antennae & (1 << i)) - max_rssi = max(ncphy->rssi_info[i << 1], max_rssi); - - IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", - ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4], - max_rssi, agc); - - /* dBm = max_rssi dB - agc dB - constant. - * Higher AGC (higher radio gain) means lower signal. */ - return max_rssi - agc - IWLAGN_RSSI_OFFSET; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw; + u8 rate __maybe_unused = + iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " + "tsf:0x%.8x%.8x rate:%d\n", + le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); + + priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); } - /* Set up 4965-specific Rx frame reply handlers */ static void iwl4965_rx_handler_setup(struct iwl_priv *priv) { /* Legacy Rx frames */ - priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx; + priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx; /* Tx response */ priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; -} - -static void iwl4965_setup_deferred_work(struct iwl_priv *priv) -{ - INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); -} - -static void iwl4965_cancel_deferred_work(struct iwl_priv *priv) -{ - cancel_work_sync(&priv->txpower_work); + priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; } static struct iwl_hcmd_ops iwl4965_hcmd = { .rxon_assoc = iwl4965_send_rxon_assoc, .commit_rxon = iwl4965_commit_rxon, - .set_rxon_chain = iwlagn_set_rxon_chain, - .send_bt_config = iwl_send_bt_config, + .set_rxon_chain = iwl4965_set_rxon_chain, }; static void iwl4965_post_scan(struct iwl_priv *priv) @@ -2344,7 +1919,7 @@ static void iwl4965_post_scan(struct iwl_priv *priv) * performing the scan, fire one off if needed */ if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) - iwlcore_commit_rxon(priv, ctx); + iwl_legacy_commit_rxon(priv, ctx); } static void iwl4965_post_associate(struct iwl_priv *priv) @@ -2357,29 +1932,24 @@ static void iwl4965_post_associate(struct iwl_priv *priv) if (!vif || !priv->is_open) return; - if (vif->type == NL80211_IFTYPE_AP) { - IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); - return; - } - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; - iwl_scan_cancel_timeout(priv, 200); + iwl_legacy_scan_cancel_timeout(priv, 200); - conf = ieee80211_get_hw_conf(priv->hw); + conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - iwlcore_commit_rxon(priv, ctx); + iwl_legacy_commit_rxon(priv, ctx); - ret = iwl_send_rxon_timing(priv, ctx); + ret = iwl_legacy_send_rxon_timing(priv, ctx); if (ret) IWL_WARN(priv, "RXON timing - " "Attempting to continue.\n"); ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - iwl_set_rxon_ht(priv, &priv->current_ht_config); + iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config); if (priv->cfg->ops->hcmd->set_rxon_chain) priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); @@ -2401,7 +1971,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv) ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; } - iwlcore_commit_rxon(priv, ctx); + iwl_legacy_commit_rxon(priv, ctx); IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", vif->bss_conf.aid, ctx->active.bssid_addr); @@ -2410,7 +1980,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv) case NL80211_IFTYPE_STATION: break; case NL80211_IFTYPE_ADHOC: - iwlagn_send_beacon_cmd(priv); + iwl4965_send_beacon_cmd(priv); break; default: IWL_ERR(priv, "%s Should not be called in %d mode\n", @@ -2422,10 +1992,10 @@ static void iwl4965_post_associate(struct iwl_priv *priv) * If chain noise has already been run, then we need to enable * power management here */ if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE) - iwl_power_update_mode(priv, false); + iwl_legacy_power_update_mode(priv, false); /* Enable Rx differential gain and sensitivity calibrations */ - iwl_chain_noise_reset(priv); + iwl4965_chain_noise_reset(priv); priv->start_calib = 1; } @@ -2441,14 +2011,14 @@ static void iwl4965_config_ap(struct iwl_priv *priv) return; /* The following should be done only at AP bring up */ - if (!iwl_is_associated_ctx(ctx)) { + if (!iwl_legacy_is_associated_ctx(ctx)) { /* RXON - unassoc (to set timing command) */ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - iwlcore_commit_rxon(priv, ctx); + iwl_legacy_commit_rxon(priv, ctx); /* RXON Timing */ - ret = iwl_send_rxon_timing(priv, ctx); + ret = iwl_legacy_send_rxon_timing(priv, ctx); if (ret) IWL_WARN(priv, "RXON timing failed - " "Attempting to continue.\n"); @@ -2456,7 +2026,7 @@ static void iwl4965_config_ap(struct iwl_priv *priv) /* AP has all antennas */ priv->chain_noise_data.active_chains = priv->hw_params.valid_rx_ant; - iwl_set_rxon_ht(priv, &priv->current_ht_config); + iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config); if (priv->cfg->ops->hcmd->set_rxon_chain) priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); @@ -2478,51 +2048,37 @@ static void iwl4965_config_ap(struct iwl_priv *priv) ~RXON_FLG_SHORT_SLOT_MSK; } /* need to send beacon cmd before committing assoc RXON! */ - iwlagn_send_beacon_cmd(priv); + iwl4965_send_beacon_cmd(priv); /* restore RXON assoc */ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; - iwlcore_commit_rxon(priv, ctx); + iwl_legacy_commit_rxon(priv, ctx); } - iwlagn_send_beacon_cmd(priv); - - /* FIXME - we need to add code here to detect a totally new - * configuration, reset the AP, unassoc, rxon timing, assoc, - * clear sta table, add BCAST sta... */ + iwl4965_send_beacon_cmd(priv); } static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { .get_hcmd_size = iwl4965_get_hcmd_size, .build_addsta_hcmd = iwl4965_build_addsta_hcmd, - .chain_noise_reset = iwl4965_chain_noise_reset, - .gain_computation = iwl4965_gain_computation, - .tx_cmd_protection = iwl_legacy_tx_cmd_protection, - .calc_rssi = iwl4965_calc_rssi, - .request_scan = iwlagn_request_scan, + .request_scan = iwl4965_request_scan, .post_scan = iwl4965_post_scan, }; static struct iwl_lib_ops iwl4965_lib = { .set_hw_params = iwl4965_hw_set_hw_params, .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl, - .txq_set_sched = iwl4965_txq_set_sched, - .txq_agg_enable = iwl4965_txq_agg_enable, - .txq_agg_disable = iwl4965_txq_agg_disable, - .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = iwl_hw_txq_free_tfd, - .txq_init = iwl_hw_tx_queue_init, + .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl4965_hw_txq_free_tfd, + .txq_init = iwl4965_hw_tx_queue_init, .rx_handler_setup = iwl4965_rx_handler_setup, - .setup_deferred_work = iwl4965_setup_deferred_work, - .cancel_deferred_work = iwl4965_cancel_deferred_work, .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr, - .alive_notify = iwl4965_alive_notify, .init_alive_start = iwl4965_init_alive_start, .load_ucode = iwl4965_load_bsm, - .dump_nic_event_log = iwl_dump_nic_event_log, - .dump_nic_error_log = iwl_dump_nic_error_log, - .dump_fh = iwl_dump_fh, + .dump_nic_event_log = iwl4965_dump_nic_event_log, + .dump_nic_error_log = iwl4965_dump_nic_error_log, + .dump_fh = iwl4965_dump_fh, .set_channel_switch = iwl4965_hw_channel_switch, .apm_ops = { - .init = iwl_apm_init, + .init = iwl_legacy_apm_init, .config = iwl4965_nic_config, }, .eeprom_ops = { @@ -2535,64 +2091,56 @@ static struct iwl_lib_ops iwl4965_lib = { EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS, EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS }, - .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, - .release_semaphore = iwlcore_eeprom_release_semaphore, - .calib_version = iwl4965_eeprom_calib_version, - .query_addr = iwlcore_eeprom_query_addr, + .acquire_semaphore = iwl4965_eeprom_acquire_semaphore, + .release_semaphore = iwl4965_eeprom_release_semaphore, }, .send_tx_power = iwl4965_send_tx_power, - .update_chain_flags = iwl_update_chain_flags, - .isr_ops = { - .isr = iwl_isr_legacy, - }, + .update_chain_flags = iwl4965_update_chain_flags, .temp_ops = { .temperature = iwl4965_temperature_calib, }, .debugfs_ops = { - .rx_stats_read = iwl_ucode_rx_stats_read, - .tx_stats_read = iwl_ucode_tx_stats_read, - .general_stats_read = iwl_ucode_general_stats_read, - .bt_stats_read = iwl_ucode_bt_stats_read, - .reply_tx_error = iwl_reply_tx_error_read, + .rx_stats_read = iwl4965_ucode_rx_stats_read, + .tx_stats_read = iwl4965_ucode_tx_stats_read, + .general_stats_read = iwl4965_ucode_general_stats_read, }, - .check_plcp_health = iwl_good_plcp_health, + .check_plcp_health = iwl4965_good_plcp_health, }; static const struct iwl_legacy_ops iwl4965_legacy_ops = { .post_associate = iwl4965_post_associate, .config_ap = iwl4965_config_ap, - .manage_ibss_station = iwlagn_manage_ibss_station, - .update_bcast_stations = iwl_update_bcast_stations, + .manage_ibss_station = iwl4965_manage_ibss_station, + .update_bcast_stations = iwl4965_update_bcast_stations, }; struct ieee80211_ops iwl4965_hw_ops = { - .tx = iwlagn_mac_tx, - .start = iwlagn_mac_start, - .stop = iwlagn_mac_stop, - .add_interface = iwl_mac_add_interface, - .remove_interface = iwl_mac_remove_interface, - .change_interface = iwl_mac_change_interface, + .tx = iwl4965_mac_tx, + .start = iwl4965_mac_start, + .stop = iwl4965_mac_stop, + .add_interface = iwl_legacy_mac_add_interface, + .remove_interface = iwl_legacy_mac_remove_interface, + .change_interface = iwl_legacy_mac_change_interface, .config = iwl_legacy_mac_config, - .configure_filter = iwlagn_configure_filter, - .set_key = iwlagn_mac_set_key, - .update_tkip_key = iwlagn_mac_update_tkip_key, - .conf_tx = iwl_mac_conf_tx, + .configure_filter = iwl4965_configure_filter, + .set_key = iwl4965_mac_set_key, + .update_tkip_key = iwl4965_mac_update_tkip_key, + .conf_tx = iwl_legacy_mac_conf_tx, .reset_tsf = iwl_legacy_mac_reset_tsf, .bss_info_changed = iwl_legacy_mac_bss_info_changed, - .ampdu_action = iwlagn_mac_ampdu_action, - .hw_scan = iwl_mac_hw_scan, - .sta_add = iwlagn_mac_sta_add, - .sta_remove = iwl_mac_sta_remove, - .channel_switch = iwlagn_mac_channel_switch, - .flush = iwlagn_mac_flush, - .tx_last_beacon = iwl_mac_tx_last_beacon, + .ampdu_action = iwl4965_mac_ampdu_action, + .hw_scan = iwl_legacy_mac_hw_scan, + .sta_add = iwl4965_mac_sta_add, + .sta_remove = iwl_legacy_mac_sta_remove, + .channel_switch = iwl4965_mac_channel_switch, + .tx_last_beacon = iwl_legacy_mac_tx_last_beacon, }; static const struct iwl_ops iwl4965_ops = { .lib = &iwl4965_lib, .hcmd = &iwl4965_hcmd, .utils = &iwl4965_hcmd_utils, - .led = &iwlagn_led_ops, + .led = &iwl4965_led_ops, .legacy = &iwl4965_legacy_ops, .ieee80211_ops = &iwl4965_hw_ops, }; @@ -2604,22 +2152,18 @@ static struct iwl_base_params iwl4965_base_params = { .pll_cfg_val = 0, .set_l0s = true, .use_bsm = true, - .use_isr_legacy = true, - .broken_powersave = true, .led_compensation = 61, .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, .wd_timeout = IWL_DEF_WD_TIMEOUT, .temperature_kelvin = true, .max_event_log_size = 512, - .tx_power_by_driver = true, .ucode_tracing = true, .sensitivity_calib_by_driver = true, .chain_noise_calib_by_driver = true, - .no_agg_framecnt_info = true, }; -struct iwl_cfg iwl4965_agn_cfg = { +struct iwl_cfg iwl4965_cfg = { .name = "Intel(R) Wireless WiFi Link 4965AGN", .fw_name_pre = IWL4965_FW_PRE, .ucode_api_max = IWL4965_UCODE_API_MAX, @@ -2630,7 +2174,7 @@ struct iwl_cfg iwl4965_agn_cfg = { .eeprom_ver = EEPROM_4965_EEPROM_VERSION, .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, .ops = &iwl4965_ops, - .mod_params = &iwlagn_mod_params, + .mod_params = &iwl4965_mod_params, .base_params = &iwl4965_base_params, .led_mode = IWL_LED_BLINK, /* @@ -2642,4 +2186,3 @@ struct iwl_cfg iwl4965_agn_cfg = { /* Module firmware */ MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); - diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h new file mode 100644 index 000000000000..01f8163daf16 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-4965.h @@ -0,0 +1,282 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_4965_h__ +#define __iwl_4965_h__ + +#include "iwl-dev.h" + +/* configuration for the _4965 devices */ +extern struct iwl_cfg iwl4965_cfg; + +extern struct iwl_mod_params iwl4965_mod_params; + +extern struct ieee80211_ops iwl4965_hw_ops; + +/* tx queue */ +void iwl4965_free_tfds_in_queue(struct iwl_priv *priv, + int sta_id, int tid, int freed); + +/* RXON */ +void iwl4965_set_rxon_chain(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); + +/* uCode */ +int iwl4965_verify_ucode(struct iwl_priv *priv); + +/* lib */ +void iwl4965_check_abort_status(struct iwl_priv *priv, + u8 frame_count, u32 status); + +void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +int iwl4965_hw_nic_init(struct iwl_priv *priv); +int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display); + +/* rx */ +void iwl4965_rx_queue_restock(struct iwl_priv *priv); +void iwl4965_rx_replenish(struct iwl_priv *priv); +void iwl4965_rx_replenish_now(struct iwl_priv *priv); +void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); +int iwl4965_rxq_stop(struct iwl_priv *priv); +int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); +void iwl4965_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl4965_rx_handle(struct iwl_priv *priv); + +/* tx */ +void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); +int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, u8 reset, u8 pad); +int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, + struct ieee80211_tx_info *info); +int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); +int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid, u16 *ssn); +int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 tid); +int iwl4965_txq_check_empty(struct iwl_priv *priv, + int sta_id, u8 tid, int txq_id); +void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index); +void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv); +int iwl4965_txq_ctx_alloc(struct iwl_priv *priv); +void iwl4965_txq_ctx_reset(struct iwl_priv *priv); +void iwl4965_txq_ctx_stop(struct iwl_priv *priv); +void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask); + +/* + * Acquire priv->lock before calling this function ! + */ +void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index); +/** + * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue + * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed + * @scd_retry: (1) Indicates queue will be used in aggregation mode + * + * NOTE: Acquire priv->lock before calling this function ! + */ +void iwl4965_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry); + +static inline u32 iwl4965_tx_status_to_mac80211(u32 status) +{ + status &= TX_STATUS_MSK; + + switch (status) { + case TX_STATUS_SUCCESS: + case TX_STATUS_DIRECT_DONE: + return IEEE80211_TX_STAT_ACK; + case TX_STATUS_FAIL_DEST_PS: + return IEEE80211_TX_STAT_TX_FILTERED; + default: + return 0; + } +} + +static inline bool iwl4965_is_tx_success(u32 status) +{ + status &= TX_STATUS_MSK; + return (status == TX_STATUS_SUCCESS) || + (status == TX_STATUS_DIRECT_DONE); +} + +u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); + +/* rx */ +void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +bool iwl4965_good_plcp_health(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); +void iwl4965_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl4965_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + +/* scan */ +int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); + +/* station mgmt */ +int iwl4965_manage_ibss_station(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add); + +/* hcmd */ +int iwl4965_send_beacon_cmd(struct iwl_priv *priv); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +const char *iwl4965_get_tx_fail_reason(u32 status); +#else +static inline const char * +iwl4965_get_tx_fail_reason(u32 status) { return ""; } +#endif + +/* station management */ +int iwl4965_alloc_bcast_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl4965_add_bssid_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, u8 *sta_id_r); +int iwl4965_remove_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key); +int iwl4965_set_default_wep_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key); +int iwl4965_restore_default_wep_keys(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl4965_set_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key, u8 sta_id); +int iwl4965_remove_dynamic_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *key, u8 sta_id); +void iwl4965_update_tkip_key(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); +int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, + int sta_id, int tid); +int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid, u16 ssn); +int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, + int tid); +void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, + int sta_id, int cnt); +int iwl4965_update_bcast_stations(struct iwl_priv *priv); + +/* rate */ +static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx) +{ + return BIT(ant_idx) << RATE_MCS_ANT_POS; +} + +static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags) +{ + return le32_to_cpu(rate_n_flags) & 0xFF; +} + +static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags) +{ + return cpu_to_le32(flags|(u32)rate); +} + +/* eeprom */ +void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); +int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv); +void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv); +int iwl4965_eeprom_check_version(struct iwl_priv *priv); + +/* mac80211 handlers (for 4965) */ +void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +int iwl4965_mac_start(struct ieee80211_hw *hw); +void iwl4965_mac_stop(struct ieee80211_hw *hw); +void iwl4965_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast); +int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key); +int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size); +int iwl4965_mac_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void iwl4965_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch); + +#endif /* __iwl_4965_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/iwl-commands.h new file mode 100644 index 000000000000..17a1d504348e --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-commands.h @@ -0,0 +1,3405 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +/* + * Please use this file (iwl-commands.h) only for uCode API definitions. + * Please use iwl-xxxx-hw.h for hardware-related definitions. + * Please use iwl-dev.h for driver implementation definitions. + */ + +#ifndef __iwl_legacy_commands_h__ +#define __iwl_legacy_commands_h__ + +struct iwl_priv; + +/* uCode version contains 4 values: Major/Minor/API/Serial */ +#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24) +#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16) +#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8) +#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF) + + +/* Tx rates */ +#define IWL_CCK_RATES 4 +#define IWL_OFDM_RATES 8 +#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES) + +enum { + REPLY_ALIVE = 0x1, + REPLY_ERROR = 0x2, + + /* RXON and QOS commands */ + REPLY_RXON = 0x10, + REPLY_RXON_ASSOC = 0x11, + REPLY_QOS_PARAM = 0x13, + REPLY_RXON_TIMING = 0x14, + + /* Multi-Station support */ + REPLY_ADD_STA = 0x18, + REPLY_REMOVE_STA = 0x19, + + /* Security */ + REPLY_WEPKEY = 0x20, + + /* RX, TX, LEDs */ + REPLY_3945_RX = 0x1b, /* 3945 only */ + REPLY_TX = 0x1c, + REPLY_RATE_SCALE = 0x47, /* 3945 only */ + REPLY_LEDS_CMD = 0x48, + REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */ + + /* 802.11h related */ + REPLY_CHANNEL_SWITCH = 0x72, + CHANNEL_SWITCH_NOTIFICATION = 0x73, + REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74, + SPECTRUM_MEASURE_NOTIFICATION = 0x75, + + /* Power Management */ + POWER_TABLE_CMD = 0x77, + PM_SLEEP_NOTIFICATION = 0x7A, + PM_DEBUG_STATISTIC_NOTIFIC = 0x7B, + + /* Scan commands and notifications */ + REPLY_SCAN_CMD = 0x80, + REPLY_SCAN_ABORT_CMD = 0x81, + SCAN_START_NOTIFICATION = 0x82, + SCAN_RESULTS_NOTIFICATION = 0x83, + SCAN_COMPLETE_NOTIFICATION = 0x84, + + /* IBSS/AP commands */ + BEACON_NOTIFICATION = 0x90, + REPLY_TX_BEACON = 0x91, + + /* Miscellaneous commands */ + REPLY_TX_PWR_TABLE_CMD = 0x97, + + /* Bluetooth device coexistence config command */ + REPLY_BT_CONFIG = 0x9b, + + /* Statistics */ + REPLY_STATISTICS_CMD = 0x9c, + STATISTICS_NOTIFICATION = 0x9d, + + /* RF-KILL commands and notifications */ + CARD_STATE_NOTIFICATION = 0xa1, + + /* Missed beacons notification */ + MISSED_BEACONS_NOTIFICATION = 0xa2, + + REPLY_CT_KILL_CONFIG_CMD = 0xa4, + SENSITIVITY_CMD = 0xa8, + REPLY_PHY_CALIBRATION_CMD = 0xb0, + REPLY_RX_PHY_CMD = 0xc0, + REPLY_RX_MPDU_CMD = 0xc1, + REPLY_RX = 0xc3, + REPLY_COMPRESSED_BA = 0xc5, + + REPLY_MAX = 0xff +}; + +/****************************************************************************** + * (0) + * Commonly used structures and definitions: + * Command header, rate_n_flags, txpower + * + *****************************************************************************/ + +/* iwl_cmd_header flags value */ +#define IWL_CMD_FAILED_MSK 0x40 + +#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) +#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) +#define SEQ_TO_INDEX(s) ((s) & 0xff) +#define INDEX_TO_SEQ(i) ((i) & 0xff) +#define SEQ_HUGE_FRAME cpu_to_le16(0x4000) +#define SEQ_RX_FRAME cpu_to_le16(0x8000) + +/** + * struct iwl_cmd_header + * + * This header format appears in the beginning of each command sent from the + * driver, and each response/notification received from uCode. + */ +struct iwl_cmd_header { + u8 cmd; /* Command ID: REPLY_RXON, etc. */ + u8 flags; /* 0:5 reserved, 6 abort, 7 internal */ + /* + * The driver sets up the sequence number to values of its choosing. + * uCode does not use this value, but passes it back to the driver + * when sending the response to each driver-originated command, so + * the driver can match the response to the command. Since the values + * don't get used by uCode, the driver may set up an arbitrary format. + * + * There is one exception: uCode sets bit 15 when it originates + * the response/notification, i.e. when the response/notification + * is not a direct response to a command sent by the driver. For + * example, uCode issues REPLY_3945_RX when it sends a received frame + * to the driver; it is not a direct response to any driver command. + * + * The Linux driver uses the following format: + * + * 0:7 tfd index - position within TX queue + * 8:12 TX queue id + * 13 reserved + * 14 huge - driver sets this to indicate command is in the + * 'huge' storage at the end of the command buffers + * 15 unsolicited RX or uCode-originated notification + */ + __le16 sequence; + + /* command or response/notification data follows immediately */ + u8 data[0]; +} __packed; + + +/** + * struct iwl3945_tx_power + * + * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH + * + * Each entry contains two values: + * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained + * linear value that multiplies the output of the digital signal processor, + * before being sent to the analog radio. + * 2) Radio gain. This sets the analog gain of the radio Tx path. + * It is a coarser setting, and behaves in a logarithmic (dB) fashion. + * + * Driver obtains values from struct iwl3945_tx_power power_gain_table[][]. + */ +struct iwl3945_tx_power { + u8 tx_gain; /* gain for analog radio */ + u8 dsp_atten; /* gain for DSP */ +} __packed; + +/** + * struct iwl3945_power_per_rate + * + * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + */ +struct iwl3945_power_per_rate { + u8 rate; /* plcp */ + struct iwl3945_tx_power tpc; + u8 reserved; +} __packed; + +/** + * iwl4965 rate_n_flags bit fields + * + * rate_n_flags format is used in following iwl4965 commands: + * REPLY_RX (response only) + * REPLY_RX_MPDU (response only) + * REPLY_TX (both command and response) + * REPLY_TX_LINK_QUALITY_CMD + * + * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"): + * 2-0: 0) 6 Mbps + * 1) 12 Mbps + * 2) 18 Mbps + * 3) 24 Mbps + * 4) 36 Mbps + * 5) 48 Mbps + * 6) 54 Mbps + * 7) 60 Mbps + * + * 4-3: 0) Single stream (SISO) + * 1) Dual stream (MIMO) + * 2) Triple stream (MIMO) + * + * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data + * + * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"): + * 3-0: 0xD) 6 Mbps + * 0xF) 9 Mbps + * 0x5) 12 Mbps + * 0x7) 18 Mbps + * 0x9) 24 Mbps + * 0xB) 36 Mbps + * 0x1) 48 Mbps + * 0x3) 54 Mbps + * + * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"): + * 6-0: 10) 1 Mbps + * 20) 2 Mbps + * 55) 5.5 Mbps + * 110) 11 Mbps + */ +#define RATE_MCS_CODE_MSK 0x7 +#define RATE_MCS_SPATIAL_POS 3 +#define RATE_MCS_SPATIAL_MSK 0x18 +#define RATE_MCS_HT_DUP_POS 5 +#define RATE_MCS_HT_DUP_MSK 0x20 + +/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */ +#define RATE_MCS_FLAGS_POS 8 +#define RATE_MCS_HT_POS 8 +#define RATE_MCS_HT_MSK 0x100 + +/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ +#define RATE_MCS_CCK_POS 9 +#define RATE_MCS_CCK_MSK 0x200 + +/* Bit 10: (1) Use Green Field preamble */ +#define RATE_MCS_GF_POS 10 +#define RATE_MCS_GF_MSK 0x400 + +/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */ +#define RATE_MCS_HT40_POS 11 +#define RATE_MCS_HT40_MSK 0x800 + +/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */ +#define RATE_MCS_DUP_POS 12 +#define RATE_MCS_DUP_MSK 0x1000 + +/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ +#define RATE_MCS_SGI_POS 13 +#define RATE_MCS_SGI_MSK 0x2000 + +/** + * rate_n_flags Tx antenna masks + * 4965 has 2 transmitters + * bit14:16 + */ +#define RATE_MCS_ANT_POS 14 +#define RATE_MCS_ANT_A_MSK 0x04000 +#define RATE_MCS_ANT_B_MSK 0x08000 +#define RATE_MCS_ANT_C_MSK 0x10000 +#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK) +#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK) +#define RATE_ANT_NUM 3 + +#define POWER_TABLE_NUM_ENTRIES 33 +#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 +#define POWER_TABLE_CCK_ENTRY 32 + +#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24 +#define IWL_PWR_CCK_ENTRIES 2 + +/** + * union iwl4965_tx_power_dual_stream + * + * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + * Use __le32 version (struct tx_power_dual_stream) when building command. + * + * Driver provides radio gain and DSP attenuation settings to device in pairs, + * one value for each transmitter chain. The first value is for transmitter A, + * second for transmitter B. + * + * For SISO bit rates, both values in a pair should be identical. + * For MIMO rates, one value may be different from the other, + * in order to balance the Tx output between the two transmitters. + * + * See more details in doc for TXPOWER in iwl-4965-hw.h. + */ +union iwl4965_tx_power_dual_stream { + struct { + u8 radio_tx_gain[2]; + u8 dsp_predis_atten[2]; + } s; + u32 dw; +}; + +/** + * struct tx_power_dual_stream + * + * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + * + * Same format as iwl_tx_power_dual_stream, but __le32 + */ +struct tx_power_dual_stream { + __le32 dw; +} __packed; + +/** + * struct iwl4965_tx_power_db + * + * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH + */ +struct iwl4965_tx_power_db { + struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES]; +} __packed; + +/****************************************************************************** + * (0a) + * Alive and Error Commands & Responses: + * + *****************************************************************************/ + +#define UCODE_VALID_OK cpu_to_le32(0x1) +#define INITIALIZE_SUBTYPE (9) + +/* + * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command) + * + * uCode issues this "initialize alive" notification once the initialization + * uCode image has completed its work, and is ready to load the runtime image. + * This is the *first* "alive" notification that the driver will receive after + * rebooting uCode; the "initialize" alive is indicated by subtype field == 9. + * + * See comments documenting "BSM" (bootstrap state machine). + * + * For 4965, this notification contains important calibration data for + * calculating txpower settings: + * + * 1) Power supply voltage indication. The voltage sensor outputs higher + * values for lower voltage, and vice verse. + * + * 2) Temperature measurement parameters, for each of two channel widths + * (20 MHz and 40 MHz) supported by the radios. Temperature sensing + * is done via one of the receiver chains, and channel width influences + * the results. + * + * 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation, + * for each of 5 frequency ranges. + */ +struct iwl_init_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; /* "9" for initialize alive */ + __le16 reserved2; + __le32 log_event_table_ptr; + __le32 error_event_table_ptr; + __le32 timestamp; + __le32 is_valid; + + /* calibration values from "initialize" uCode */ + __le32 voltage; /* signed, higher value is lower voltage */ + __le32 therm_r1[2]; /* signed, 1st for normal, 2nd for HT40 */ + __le32 therm_r2[2]; /* signed */ + __le32 therm_r3[2]; /* signed */ + __le32 therm_r4[2]; /* signed */ + __le32 tx_atten[5][2]; /* signed MIMO gain comp, 5 freq groups, + * 2 Tx chains */ +} __packed; + + +/** + * REPLY_ALIVE = 0x1 (response only, not a command) + * + * uCode issues this "alive" notification once the runtime image is ready + * to receive commands from the driver. This is the *second* "alive" + * notification that the driver will receive after rebooting uCode; + * this "alive" is indicated by subtype field != 9. + * + * See comments documenting "BSM" (bootstrap state machine). + * + * This response includes two pointers to structures within the device's + * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging: + * + * 1) log_event_table_ptr indicates base of the event log. This traces + * a 256-entry history of uCode execution within a circular buffer. + * Its header format is: + * + * __le32 log_size; log capacity (in number of entries) + * __le32 type; (1) timestamp with each entry, (0) no timestamp + * __le32 wraps; # times uCode has wrapped to top of circular buffer + * __le32 write_index; next circular buffer entry that uCode would fill + * + * The header is followed by the circular buffer of log entries. Entries + * with timestamps have the following format: + * + * __le32 event_id; range 0 - 1500 + * __le32 timestamp; low 32 bits of TSF (of network, if associated) + * __le32 data; event_id-specific data value + * + * Entries without timestamps contain only event_id and data. + * + * + * 2) error_event_table_ptr indicates base of the error log. This contains + * information about any uCode error that occurs. For 4965, the format + * of the error log is: + * + * __le32 valid; (nonzero) valid, (0) log is empty + * __le32 error_id; type of error + * __le32 pc; program counter + * __le32 blink1; branch link + * __le32 blink2; branch link + * __le32 ilink1; interrupt link + * __le32 ilink2; interrupt link + * __le32 data1; error-specific data + * __le32 data2; error-specific data + * __le32 line; source code line of error + * __le32 bcon_time; beacon timer + * __le32 tsf_low; network timestamp function timer + * __le32 tsf_hi; network timestamp function timer + * __le32 gp1; GP1 timer register + * __le32 gp2; GP2 timer register + * __le32 gp3; GP3 timer register + * __le32 ucode_ver; uCode version + * __le32 hw_ver; HW Silicon version + * __le32 brd_ver; HW board version + * __le32 log_pc; log program counter + * __le32 frame_ptr; frame pointer + * __le32 stack_ptr; stack pointer + * __le32 hcmd; last host command + * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag + * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag + * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag + * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag + * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt + * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT + * __le32 wait_event; wait event() caller address + * __le32 l2p_control; L2pControlField + * __le32 l2p_duration; L2pDurationField + * __le32 l2p_mhvalid; L2pMhValidBits + * __le32 l2p_addr_match; L2pAddrMatchStat + * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL) + * __le32 u_timestamp; indicate when the date and time of the compilation + * __le32 reserved; + * + * The Linux driver can print both logs to the system log when a uCode error + * occurs. + */ +struct iwl_alive_resp { + u8 ucode_minor; + u8 ucode_major; + __le16 reserved1; + u8 sw_rev[8]; + u8 ver_type; + u8 ver_subtype; /* not "9" for runtime alive */ + __le16 reserved2; + __le32 log_event_table_ptr; /* SRAM address for event log */ + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 timestamp; + __le32 is_valid; +} __packed; + +/* + * REPLY_ERROR = 0x2 (response only, not a command) + */ +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; + __le32 error_info; + __le64 timestamp; +} __packed; + +/****************************************************************************** + * (1) + * RXON Commands & Responses: + * + *****************************************************************************/ + +/* + * Rx config defines & structure + */ +/* rx_config device types */ +enum { + RXON_DEV_TYPE_AP = 1, + RXON_DEV_TYPE_ESS = 3, + RXON_DEV_TYPE_IBSS = 4, + RXON_DEV_TYPE_SNIFFER = 6, +}; + + +#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0) +#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0) +#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1) +#define RXON_RX_CHAIN_VALID_POS (1) +#define RXON_RX_CHAIN_FORCE_SEL_MSK cpu_to_le16(0x7 << 4) +#define RXON_RX_CHAIN_FORCE_SEL_POS (4) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK cpu_to_le16(0x7 << 7) +#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS (7) +#define RXON_RX_CHAIN_CNT_MSK cpu_to_le16(0x3 << 10) +#define RXON_RX_CHAIN_CNT_POS (10) +#define RXON_RX_CHAIN_MIMO_CNT_MSK cpu_to_le16(0x3 << 12) +#define RXON_RX_CHAIN_MIMO_CNT_POS (12) +#define RXON_RX_CHAIN_MIMO_FORCE_MSK cpu_to_le16(0x1 << 14) +#define RXON_RX_CHAIN_MIMO_FORCE_POS (14) + +/* rx_config flags */ +/* band & modulation selection */ +#define RXON_FLG_BAND_24G_MSK cpu_to_le32(1 << 0) +#define RXON_FLG_CCK_MSK cpu_to_le32(1 << 1) +/* auto detection enable */ +#define RXON_FLG_AUTO_DETECT_MSK cpu_to_le32(1 << 2) +/* TGg protection when tx */ +#define RXON_FLG_TGG_PROTECT_MSK cpu_to_le32(1 << 3) +/* cck short slot & preamble */ +#define RXON_FLG_SHORT_SLOT_MSK cpu_to_le32(1 << 4) +#define RXON_FLG_SHORT_PREAMBLE_MSK cpu_to_le32(1 << 5) +/* antenna selection */ +#define RXON_FLG_DIS_DIV_MSK cpu_to_le32(1 << 7) +#define RXON_FLG_ANT_SEL_MSK cpu_to_le32(0x0f00) +#define RXON_FLG_ANT_A_MSK cpu_to_le32(1 << 8) +#define RXON_FLG_ANT_B_MSK cpu_to_le32(1 << 9) +/* radar detection enable */ +#define RXON_FLG_RADAR_DETECT_MSK cpu_to_le32(1 << 12) +#define RXON_FLG_TGJ_NARROW_BAND_MSK cpu_to_le32(1 << 13) +/* rx response to host with 8-byte TSF +* (according to ON_AIR deassertion) */ +#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15) + + +/* HT flags */ +#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22) +#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22) + +#define RXON_FLG_HT_OPERATING_MODE_POS (23) + +#define RXON_FLG_HT_PROT_MSK cpu_to_le32(0x1 << 23) +#define RXON_FLG_HT40_PROT_MSK cpu_to_le32(0x2 << 23) + +#define RXON_FLG_CHANNEL_MODE_POS (25) +#define RXON_FLG_CHANNEL_MODE_MSK cpu_to_le32(0x3 << 25) + +/* channel mode */ +enum { + CHANNEL_MODE_LEGACY = 0, + CHANNEL_MODE_PURE_40 = 1, + CHANNEL_MODE_MIXED = 2, + CHANNEL_MODE_RESERVED = 3, +}; +#define RXON_FLG_CHANNEL_MODE_LEGACY \ + cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS) +#define RXON_FLG_CHANNEL_MODE_PURE_40 \ + cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS) +#define RXON_FLG_CHANNEL_MODE_MIXED \ + cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS) + +/* CTS to self (if spec allows) flag */ +#define RXON_FLG_SELF_CTS_EN cpu_to_le32(0x1<<30) + +/* rx_config filter flags */ +/* accept all data frames */ +#define RXON_FILTER_PROMISC_MSK cpu_to_le32(1 << 0) +/* pass control & management to host */ +#define RXON_FILTER_CTL2HOST_MSK cpu_to_le32(1 << 1) +/* accept multi-cast */ +#define RXON_FILTER_ACCEPT_GRP_MSK cpu_to_le32(1 << 2) +/* don't decrypt uni-cast frames */ +#define RXON_FILTER_DIS_DECRYPT_MSK cpu_to_le32(1 << 3) +/* don't decrypt multi-cast frames */ +#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4) +/* STA is associated */ +#define RXON_FILTER_ASSOC_MSK cpu_to_le32(1 << 5) +/* transfer to host non bssid beacons in associated state */ +#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6) + +/** + * REPLY_RXON = 0x10 (command, has simple generic response) + * + * RXON tunes the radio tuner to a service channel, and sets up a number + * of parameters that are used primarily for Rx, but also for Tx operations. + * + * NOTE: When tuning to a new channel, driver must set the + * RXON_FILTER_ASSOC_MSK to 0. This will clear station-dependent + * info within the device, including the station tables, tx retry + * rate tables, and txpower tables. Driver must build a new station + * table and txpower table before transmitting anything on the RXON + * channel. + * + * NOTE: All RXONs wipe clean the internal txpower table. Driver must + * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10), + * regardless of whether RXON_FILTER_ASSOC_MSK is set. + */ + +struct iwl3945_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; + __le16 reserved4; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; + __le16 reserved5; +} __packed; + +struct iwl4965_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; + __le16 rx_chain; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; +} __packed; + +/* Create a common rxon cmd which will be typecast into the 3945 or 4965 + * specific rxon cmd, depending on where it is called from. + */ +struct iwl_legacy_rxon_cmd { + u8 node_addr[6]; + __le16 reserved1; + u8 bssid_addr[6]; + __le16 reserved2; + u8 wlap_bssid_addr[6]; + __le16 reserved3; + u8 dev_type; + u8 air_propagation; + __le16 rx_chain; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 assoc_id; + __le32 flags; + __le32 filter_flags; + __le16 channel; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + u8 reserved4; + u8 reserved5; +} __packed; + + +/* + * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response) + */ +struct iwl3945_rxon_assoc_cmd { + __le32 flags; + __le32 filter_flags; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + __le16 reserved; +} __packed; + +struct iwl4965_rxon_assoc_cmd { + __le32 flags; + __le32 filter_flags; + u8 ofdm_basic_rates; + u8 cck_basic_rates; + u8 ofdm_ht_single_stream_basic_rates; + u8 ofdm_ht_dual_stream_basic_rates; + __le16 rx_chain_select_flags; + __le16 reserved; +} __packed; + +#define IWL_CONN_MAX_LISTEN_INTERVAL 10 +#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ +#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */ + +/* + * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) + */ +struct iwl_rxon_time_cmd { + __le64 timestamp; + __le16 beacon_interval; + __le16 atim_window; + __le32 beacon_init_val; + __le16 listen_interval; + u8 dtim_period; + u8 delta_cp_bss_tbtts; +} __packed; + +/* + * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response) + */ +struct iwl3945_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + struct iwl3945_power_per_rate power[IWL_MAX_RATES]; +} __packed; + +struct iwl4965_channel_switch_cmd { + u8 band; + u8 expect_beacon; + __le16 channel; + __le32 rxon_flags; + __le32 rxon_filter_flags; + __le32 switch_time; + struct iwl4965_tx_power_db tx_power; +} __packed; + +/* + * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) + */ +struct iwl_csa_notification { + __le16 band; + __le16 channel; + __le32 status; /* 0 - OK, 1 - fail */ +} __packed; + +/****************************************************************************** + * (2) + * Quality-of-Service (QOS) Commands & Responses: + * + *****************************************************************************/ + +/** + * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM + * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd + * + * @cw_min: Contention window, start value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x0f. + * @cw_max: Contention window, max value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x3f. + * @aifsn: Number of slots in Arbitration Interframe Space (before + * performing random backoff timing prior to Tx). Device default 1. + * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. + * + * Device will automatically increase contention window by (2*CW) + 1 for each + * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW + * value, to cap the CW value. + */ +struct iwl_ac_qos { + __le16 cw_min; + __le16 cw_max; + u8 aifsn; + u8 reserved1; + __le16 edca_txop; +} __packed; + +/* QoS flags defines */ +#define QOS_PARAM_FLG_UPDATE_EDCA_MSK cpu_to_le32(0x01) +#define QOS_PARAM_FLG_TGN_MSK cpu_to_le32(0x02) +#define QOS_PARAM_FLG_TXOP_TYPE_MSK cpu_to_le32(0x10) + +/* Number of Access Categories (AC) (EDCA), queues 0..3 */ +#define AC_NUM 4 + +/* + * REPLY_QOS_PARAM = 0x13 (command, has simple generic response) + * + * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs + * 0: Background, 1: Best Effort, 2: Video, 3: Voice. + */ +struct iwl_qosparam_cmd { + __le32 qos_flags; + struct iwl_ac_qos ac[AC_NUM]; +} __packed; + +/****************************************************************************** + * (3) + * Add/Modify Stations Commands & Responses: + * + *****************************************************************************/ +/* + * Multi station support + */ + +/* Special, dedicated locations within device's station table */ +#define IWL_AP_ID 0 +#define IWL_STA_ID 2 +#define IWL3945_BROADCAST_ID 24 +#define IWL3945_STATION_COUNT 25 +#define IWL4965_BROADCAST_ID 31 +#define IWL4965_STATION_COUNT 32 + +#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/ +#define IWL_INVALID_STATION 255 + +#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) +#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) +#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17) +#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18) +#define STA_FLG_MAX_AGG_SIZE_POS (19) +#define STA_FLG_MAX_AGG_SIZE_MSK cpu_to_le32(3 << 19) +#define STA_FLG_HT40_EN_MSK cpu_to_le32(1 << 21) +#define STA_FLG_MIMO_DIS_MSK cpu_to_le32(1 << 22) +#define STA_FLG_AGG_MPDU_DENSITY_POS (23) +#define STA_FLG_AGG_MPDU_DENSITY_MSK cpu_to_le32(7 << 23) + +/* Use in mode field. 1: modify existing entry, 0: add new station entry */ +#define STA_CONTROL_MODIFY_MSK 0x01 + +/* key flags __le16*/ +#define STA_KEY_FLG_ENCRYPT_MSK cpu_to_le16(0x0007) +#define STA_KEY_FLG_NO_ENC cpu_to_le16(0x0000) +#define STA_KEY_FLG_WEP cpu_to_le16(0x0001) +#define STA_KEY_FLG_CCMP cpu_to_le16(0x0002) +#define STA_KEY_FLG_TKIP cpu_to_le16(0x0003) + +#define STA_KEY_FLG_KEYID_POS 8 +#define STA_KEY_FLG_INVALID cpu_to_le16(0x0800) +/* wep key is either from global key (0) or from station info array (1) */ +#define STA_KEY_FLG_MAP_KEY_MSK cpu_to_le16(0x0008) + +/* wep key in STA: 5-bytes (0) or 13-bytes (1) */ +#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000) +#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000) +#define STA_KEY_MAX_NUM 8 + +/* Flags indicate whether to modify vs. don't change various station params */ +#define STA_MODIFY_KEY_MASK 0x01 +#define STA_MODIFY_TID_DISABLE_TX 0x02 +#define STA_MODIFY_TX_RATE_MSK 0x04 +#define STA_MODIFY_ADDBA_TID_MSK 0x08 +#define STA_MODIFY_DELBA_TID_MSK 0x10 +#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20 + +/* Receiver address (actually, Rx station's index into station table), + * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ +#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) + +struct iwl4965_keyinfo { + __le16 key_flags; + u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ + u8 reserved1; + __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */ + u8 key_offset; + u8 reserved2; + u8 key[16]; /* 16-byte unicast decryption key */ +} __packed; + +/** + * struct sta_id_modify + * @addr[ETH_ALEN]: station's MAC address + * @sta_id: index of station in uCode's station table + * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change + * + * Driver selects unused table index when adding new station, + * or the index to a pre-existing station entry when modifying that station. + * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP). + * + * modify_mask flags select which parameters to modify vs. leave alone. + */ +struct sta_id_modify { + u8 addr[ETH_ALEN]; + __le16 reserved1; + u8 sta_id; + u8 modify_mask; + __le16 reserved2; +} __packed; + +/* + * REPLY_ADD_STA = 0x18 (command) + * + * The device contains an internal table of per-station information, + * with info on security keys, aggregation parameters, and Tx rates for + * initial Tx attempt and any retries (4965 devices uses + * REPLY_TX_LINK_QUALITY_CMD, + * 3945 uses REPLY_RATE_SCALE to set up rate tables). + * + * REPLY_ADD_STA sets up the table entry for one station, either creating + * a new entry, or modifying a pre-existing one. + * + * NOTE: RXON command (without "associated" bit set) wipes the station table + * clean. Moving into RF_KILL state does this also. Driver must set up + * new station table before transmitting anything on the RXON channel + * (except active scans or active measurements; those commands carry + * their own txpower/rate setup data). + * + * When getting started on a new channel, driver must set up the + * IWL_BROADCAST_ID entry (last entry in the table). For a client + * station in a BSS, once an AP is selected, driver sets up the AP STA + * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP + * are all that are needed for a BSS client station. If the device is + * used as AP, or in an IBSS network, driver must set up station table + * entries for all STAs in network, starting with index IWL_STA_ID. + */ + +struct iwl3945_addsta_cmd { + u8 mode; /* 1: modify existing, 0: add new station */ + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ + __le32 station_flags_msk; /* STA_FLG_* */ + + /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) + * corresponding to bit (e.g. bit 5 controls TID 5). + * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ + __le16 tid_disable_tx; + + __le16 rate_n_flags; + + /* TID for which to add block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + u8 add_immediate_ba_tid; + + /* TID for which to remove block-ack support. + * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */ + u8 remove_immediate_ba_tid; + + /* Starting Sequence Number for added block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + __le16 add_immediate_ba_ssn; +} __packed; + +struct iwl4965_addsta_cmd { + u8 mode; /* 1: modify existing, 0: add new station */ + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ + __le32 station_flags_msk; /* STA_FLG_* */ + + /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) + * corresponding to bit (e.g. bit 5 controls TID 5). + * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ + __le16 tid_disable_tx; + + __le16 reserved1; + + /* TID for which to add block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + u8 add_immediate_ba_tid; + + /* TID for which to remove block-ack support. + * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */ + u8 remove_immediate_ba_tid; + + /* Starting Sequence Number for added block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + __le16 add_immediate_ba_ssn; + + /* + * Number of packets OK to transmit to station even though + * it is asleep -- used to synchronise PS-poll and u-APSD + * responses while ucode keeps track of STA sleep state. + */ + __le16 sleep_tx_count; + + __le16 reserved2; +} __packed; + +/* Wrapper struct for 3945 and 4965 addsta_cmd structures */ +struct iwl_legacy_addsta_cmd { + u8 mode; /* 1: modify existing, 0: add new station */ + u8 reserved[3]; + struct sta_id_modify sta; + struct iwl4965_keyinfo key; + __le32 station_flags; /* STA_FLG_* */ + __le32 station_flags_msk; /* STA_FLG_* */ + + /* bit field to disable (1) or enable (0) Tx for Traffic ID (TID) + * corresponding to bit (e.g. bit 5 controls TID 5). + * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */ + __le16 tid_disable_tx; + + __le16 rate_n_flags; /* 3945 only */ + + /* TID for which to add block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + u8 add_immediate_ba_tid; + + /* TID for which to remove block-ack support. + * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */ + u8 remove_immediate_ba_tid; + + /* Starting Sequence Number for added block-ack support. + * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ + __le16 add_immediate_ba_ssn; + + /* + * Number of packets OK to transmit to station even though + * it is asleep -- used to synchronise PS-poll and u-APSD + * responses while ucode keeps track of STA sleep state. + */ + __le16 sleep_tx_count; + + __le16 reserved2; +} __packed; + + +#define ADD_STA_SUCCESS_MSK 0x1 +#define ADD_STA_NO_ROOM_IN_TABLE 0x2 +#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4 +#define ADD_STA_MODIFY_NON_EXIST_STA 0x8 +/* + * REPLY_ADD_STA = 0x18 (response) + */ +struct iwl_add_sta_resp { + u8 status; /* ADD_STA_* */ +} __packed; + +#define REM_STA_SUCCESS_MSK 0x1 +/* + * REPLY_REM_STA = 0x19 (response) + */ +struct iwl_rem_sta_resp { + u8 status; +} __packed; + +/* + * REPLY_REM_STA = 0x19 (command) + */ +struct iwl_rem_sta_cmd { + u8 num_sta; /* number of removed stations */ + u8 reserved[3]; + u8 addr[ETH_ALEN]; /* MAC addr of the first station */ + u8 reserved2[2]; +} __packed; + +#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0)) +#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1)) +#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2)) +#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3)) +#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) + +#define IWL_DROP_SINGLE 0 +#define IWL_DROP_SELECTED 1 +#define IWL_DROP_ALL 2 + +/* + * REPLY_WEP_KEY = 0x20 + */ +struct iwl_wep_key { + u8 key_index; + u8 key_offset; + u8 reserved1[2]; + u8 key_size; + u8 reserved2[3]; + u8 key[16]; +} __packed; + +struct iwl_wep_cmd { + u8 num_keys; + u8 global_key_type; + u8 flags; + u8 reserved; + struct iwl_wep_key key[0]; +} __packed; + +#define WEP_KEY_WEP_TYPE 1 +#define WEP_KEYS_MAX 4 +#define WEP_INVALID_OFFSET 0xff +#define WEP_KEY_LEN_64 5 +#define WEP_KEY_LEN_128 13 + +/****************************************************************************** + * (4) + * Rx Responses: + * + *****************************************************************************/ + +#define RX_RES_STATUS_NO_CRC32_ERROR cpu_to_le32(1 << 0) +#define RX_RES_STATUS_NO_RXE_OVERFLOW cpu_to_le32(1 << 1) + +#define RX_RES_PHY_FLAGS_BAND_24_MSK cpu_to_le16(1 << 0) +#define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) +#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) +#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) +#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0 +#define RX_RES_PHY_FLAGS_ANTENNA_POS 4 + +#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) +#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) +#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8) +#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8) +#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8) +#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8) + +#define RX_RES_STATUS_STATION_FOUND (1<<6) +#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7) + +#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11) +#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11) +#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11) +#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11) +#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11) + +#define RX_MPDU_RES_STATUS_ICV_OK (0x20) +#define RX_MPDU_RES_STATUS_MIC_OK (0x40) +#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7) +#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800) + + +struct iwl3945_rx_frame_stats { + u8 phy_count; + u8 id; + u8 rssi; + u8 agc; + __le16 sig_avg; + __le16 noise_diff; + u8 payload[0]; +} __packed; + +struct iwl3945_rx_frame_hdr { + __le16 channel; + __le16 phy_flags; + u8 reserved1; + u8 rate; + __le16 len; + u8 payload[0]; +} __packed; + +struct iwl3945_rx_frame_end { + __le32 status; + __le64 timestamp; + __le32 beacon_timestamp; +} __packed; + +/* + * REPLY_3945_RX = 0x1b (response only, not a command) + * + * NOTE: DO NOT dereference from casts to this structure + * It is provided only for calculating minimum data set size. + * The actual offsets of the hdr and end are dynamic based on + * stats.phy_count + */ +struct iwl3945_rx_frame { + struct iwl3945_rx_frame_stats stats; + struct iwl3945_rx_frame_hdr hdr; + struct iwl3945_rx_frame_end end; +} __packed; + +#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame)) + +/* Fixed (non-configurable) rx data from phy */ + +#define IWL49_RX_RES_PHY_CNT 14 +#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4) +#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70) +#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */ +#define IWL49_AGC_DB_POS (7) +struct iwl4965_rx_non_cfg_phy { + __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */ + __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */ + u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */ + u8 pad[0]; +} __packed; + + +/* + * REPLY_RX = 0xc3 (response only, not a command) + * Used only for legacy (non 11n) frames. + */ +struct iwl_rx_phy_res { + u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */ + u8 cfg_phy_cnt; /* configurable DSP phy data byte count */ + u8 stat_id; /* configurable DSP phy data set ID */ + u8 reserved1; + __le64 timestamp; /* TSF at on air rise */ + __le32 beacon_time_stamp; /* beacon at on-air rise */ + __le16 phy_flags; /* general phy flags: band, modulation, ... */ + __le16 channel; /* channel number */ + u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */ + __le32 rate_n_flags; /* RATE_MCS_* */ + __le16 byte_count; /* frame's byte-count */ + __le16 frame_time; /* frame's time on the air */ +} __packed; + +struct iwl_rx_mpdu_res_start { + __le16 byte_count; + __le16 reserved; +} __packed; + + +/****************************************************************************** + * (5) + * Tx Commands & Responses: + * + * Driver must place each REPLY_TX command into one of the prioritized Tx + * queues in host DRAM, shared between driver and device (see comments for + * SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode + * are preparing to transmit, the device pulls the Tx command over the PCI + * bus via one of the device's Tx DMA channels, to fill an internal FIFO + * from which data will be transmitted. + * + * uCode handles all timing and protocol related to control frames + * (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler + * handle reception of block-acks; uCode updates the host driver via + * REPLY_COMPRESSED_BA. + * + * uCode handles retrying Tx when an ACK is expected but not received. + * This includes trying lower data rates than the one requested in the Tx + * command, as set up by the REPLY_RATE_SCALE (for 3945) or + * REPLY_TX_LINK_QUALITY_CMD (4965). + * + * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD. + * This command must be executed after every RXON command, before Tx can occur. + *****************************************************************************/ + +/* REPLY_TX Tx flags field */ + +/* + * 1: Use Request-To-Send protocol before this frame. + * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK. + */ +#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1) + +/* + * 1: Transmit Clear-To-Send to self before this frame. + * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames. + * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK. + */ +#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2) + +/* 1: Expect ACK from receiving station + * 0: Don't expect ACK (MAC header's duration field s/b 0) + * Set this for unicast frames, but not broadcast/multicast. */ +#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3) + +/* For 4965 devices: + * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD). + * Tx command's initial_rate_index indicates first rate to try; + * uCode walks through table for additional Tx attempts. + * 0: Use Tx rate/MCS from Tx command's rate_n_flags field. + * This rate will be used for all Tx attempts; it will not be scaled. */ +#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4) + +/* 1: Expect immediate block-ack. + * Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */ +#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6) + +/* + * 1: Frame requires full Tx-Op protection. + * Set this if either RTS or CTS Tx Flag gets set. + */ +#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7) + +/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices. + * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */ +#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00) +#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8) +#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9) + +/* 1: uCode overrides sequence control field in MAC header. + * 0: Driver provides sequence control field in MAC header. + * Set this for management frames, non-QOS data frames, non-unicast frames, + * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */ +#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13) + +/* 1: This frame is non-last MPDU; more fragments are coming. + * 0: Last fragment, or not using fragmentation. */ +#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14) + +/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame. + * 0: No TSF required in outgoing frame. + * Set this for transmitting beacons and probe responses. */ +#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16) + +/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword + * alignment of frame's payload data field. + * 0: No pad + * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4 + * field (but not both). Driver must align frame data (i.e. data following + * MAC header) to DWORD boundary. */ +#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20) + +/* accelerate aggregation support + * 0 - no CCMP encryption; 1 - CCMP encryption */ +#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22) + +/* HCCA-AP - disable duration overwriting. */ +#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25) + + +/* + * TX command security control + */ +#define TX_CMD_SEC_WEP 0x01 +#define TX_CMD_SEC_CCM 0x02 +#define TX_CMD_SEC_TKIP 0x03 +#define TX_CMD_SEC_MSK 0x03 +#define TX_CMD_SEC_SHIFT 6 +#define TX_CMD_SEC_KEY128 0x08 + +/* + * security overhead sizes + */ +#define WEP_IV_LEN 4 +#define WEP_ICV_LEN 4 +#define CCMP_MIC_LEN 8 +#define TKIP_ICV_LEN 4 + +/* + * REPLY_TX = 0x1c (command) + */ + +struct iwl3945_tx_cmd { + /* + * MPDU byte count: + * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * NOTE: Does not include Tx command bytes, post-MAC pad bytes, + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i + * Range: 14-2342 bytes. + */ + __le16 len; + + /* + * MPDU or MSDU byte count for next frame. + * Used for fragmentation and bursting, but not 11n aggregation. + * Same as "len", but for next frame. Set to 0 if not applicable. + */ + __le16 next_frame_len; + + __le32 tx_flags; /* TX_CMD_FLG_* */ + + u8 rate; + + /* Index of recipient station in uCode's station table */ + u8 sta_id; + u8 tid_tspec; + u8 sec_ctl; + u8 key[16]; + union { + u8 byte[8]; + __le16 word[4]; + __le32 dw[2]; + } tkip_mic; + __le32 next_frame_info; + union { + __le32 life_time; + __le32 attempt; + } stop_time; + u8 supp_rates[2]; + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + + /* + * Duration of EDCA burst Tx Opportunity, in 32-usec units. + * Set this if txop time is not specified by HCCA protocol (e.g. by AP). + */ + __le16 driver_txop; + + /* + * MAC header goes here, followed by 2 bytes padding if MAC header + * length is 26 or 30 bytes, followed by payload data + */ + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __packed; + +/* + * REPLY_TX = 0x1c (response) + */ +struct iwl3945_tx_resp { + u8 failure_rts; + u8 failure_frame; + u8 bt_kill_count; + u8 rate; + __le32 wireless_media_time; + __le32 status; /* TX status */ +} __packed; + + +/* + * 4965 uCode updates these Tx attempt count values in host DRAM. + * Used for managing Tx retries when expecting block-acks. + * Driver should set these fields to 0. + */ +struct iwl_dram_scratch { + u8 try_cnt; /* Tx attempts */ + u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */ + __le16 reserved; +} __packed; + +struct iwl_tx_cmd { + /* + * MPDU byte count: + * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * NOTE: Does not include Tx command bytes, post-MAC pad bytes, + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i + * Range: 14-2342 bytes. + */ + __le16 len; + + /* + * MPDU or MSDU byte count for next frame. + * Used for fragmentation and bursting, but not 11n aggregation. + * Same as "len", but for next frame. Set to 0 if not applicable. + */ + __le16 next_frame_len; + + __le32 tx_flags; /* TX_CMD_FLG_* */ + + /* uCode may modify this field of the Tx command (in host DRAM!). + * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ + struct iwl_dram_scratch scratch; + + /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ + __le32 rate_n_flags; /* RATE_MCS_* */ + + /* Index of destination station in uCode's station table */ + u8 sta_id; + + /* Type of security encryption: CCM or TKIP */ + u8 sec_ctl; /* TX_CMD_SEC_* */ + + /* + * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial + * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for + * data frames, this field may be used to selectively reduce initial + * rate (via non-0 value) for special frames (e.g. management), while + * still supporting rate scaling for all frames. + */ + u8 initial_rate_index; + u8 reserved; + u8 key[16]; + __le16 next_frame_flags; + __le16 reserved2; + union { + __le32 life_time; + __le32 attempt; + } stop_time; + + /* Host DRAM physical address pointer to "scratch" in this command. + * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */ + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; + + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ + u8 tid_tspec; + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + + /* + * Duration of EDCA burst Tx Opportunity, in 32-usec units. + * Set this if txop time is not specified by HCCA protocol (e.g. by AP). + */ + __le16 driver_txop; + + /* + * MAC header goes here, followed by 2 bytes padding if MAC header + * length is 26 or 30 bytes, followed by payload data + */ + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __packed; + +/* TX command response is sent after *3945* transmission attempts. + * + * NOTES: + * + * TX_STATUS_FAIL_NEXT_FRAG + * + * If the fragment flag in the MAC header for the frame being transmitted + * is set and there is insufficient time to transmit the next frame, the + * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'. + * + * TX_STATUS_FIFO_UNDERRUN + * + * Indicates the host did not provide bytes to the FIFO fast enough while + * a TX was in progress. + * + * TX_STATUS_FAIL_MGMNT_ABORT + * + * This status is only possible if the ABORT ON MGMT RX parameter was + * set to true with the TX command. + * + * If the MSB of the status parameter is set then an abort sequence is + * required. This sequence consists of the host activating the TX Abort + * control line, and then waiting for the TX Abort command response. This + * indicates that a the device is no longer in a transmit state, and that the + * command FIFO has been cleared. The host must then deactivate the TX Abort + * control line. Receiving is still allowed in this case. + */ +enum { + TX_3945_STATUS_SUCCESS = 0x01, + TX_3945_STATUS_DIRECT_DONE = 0x02, + TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82, + TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83, + TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84, + TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85, + TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86, + TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87, + TX_3945_STATUS_FAIL_DEST_PS = 0x88, + TX_3945_STATUS_FAIL_ABORTED = 0x89, + TX_3945_STATUS_FAIL_BT_RETRY = 0x8a, + TX_3945_STATUS_FAIL_STA_INVALID = 0x8b, + TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c, + TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d, + TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e, + TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, + TX_3945_STATUS_FAIL_TX_LOCKED = 0x90, + TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, +}; + +/* + * TX command response is sent after *4965* transmission attempts. + * + * both postpone and abort status are expected behavior from uCode. there is + * no special operation required from driver; except for RFKILL_FLUSH, + * which required tx flush host command to flush all the tx frames in queues + */ +enum { + TX_STATUS_SUCCESS = 0x01, + TX_STATUS_DIRECT_DONE = 0x02, + /* postpone TX */ + TX_STATUS_POSTPONE_DELAY = 0x40, + TX_STATUS_POSTPONE_FEW_BYTES = 0x41, + TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, + TX_STATUS_POSTPONE_CALC_TTAK = 0x44, + /* abort TX */ + TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, + TX_STATUS_FAIL_SHORT_LIMIT = 0x82, + TX_STATUS_FAIL_LONG_LIMIT = 0x83, + TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84, + TX_STATUS_FAIL_DRAIN_FLOW = 0x85, + TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, + TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, + TX_STATUS_FAIL_DEST_PS = 0x88, + TX_STATUS_FAIL_HOST_ABORTED = 0x89, + TX_STATUS_FAIL_BT_RETRY = 0x8a, + TX_STATUS_FAIL_STA_INVALID = 0x8b, + TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, + TX_STATUS_FAIL_TID_DISABLE = 0x8d, + TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, + TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, + TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90, + TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, +}; + +#define TX_PACKET_MODE_REGULAR 0x0000 +#define TX_PACKET_MODE_BURST_SEQ 0x0100 +#define TX_PACKET_MODE_BURST_FIRST 0x0200 + +enum { + TX_POWER_PA_NOT_ACTIVE = 0x0, +}; + +enum { + TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */ + TX_STATUS_DELAY_MSK = 0x00000040, + TX_STATUS_ABORT_MSK = 0x00000080, + TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */ + TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */ + TX_RESERVED = 0x00780000, /* bits 19:22 */ + TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */ + TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ +}; + +/* ******************************* + * TX aggregation status + ******************************* */ + +enum { + AGG_TX_STATE_TRANSMITTED = 0x00, + AGG_TX_STATE_UNDERRUN_MSK = 0x01, + AGG_TX_STATE_FEW_BYTES_MSK = 0x04, + AGG_TX_STATE_ABORT_MSK = 0x08, + AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10, + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20, + AGG_TX_STATE_SCD_QUERY_MSK = 0x80, + AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100, + AGG_TX_STATE_RESPONSE_MSK = 0x1ff, + AGG_TX_STATE_DUMP_TX_MSK = 0x200, + AGG_TX_STATE_DELAY_TX_MSK = 0x400 +}; + +#define AGG_TX_STATUS_MSK 0x00000fff /* bits 0:11 */ +#define AGG_TX_TRY_MSK 0x0000f000 /* bits 12:15 */ + +#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL_MSK | \ + AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK) + +/* # tx attempts for first frame in aggregation */ +#define AGG_TX_STATE_TRY_CNT_POS 12 +#define AGG_TX_STATE_TRY_CNT_MSK 0xf000 + +/* Command ID and sequence number of Tx command for this frame */ +#define AGG_TX_STATE_SEQ_NUM_POS 16 +#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000 + +/* + * REPLY_TX = 0x1c (response) + * + * This response may be in one of two slightly different formats, indicated + * by the frame_count field: + * + * 1) No aggregation (frame_count == 1). This reports Tx results for + * a single frame. Multiple attempts, at various bit rates, may have + * been made for this frame. + * + * 2) Aggregation (frame_count > 1). This reports Tx results for + * 2 or more frames that used block-acknowledge. All frames were + * transmitted at same rate. Rate scaling may have been used if first + * frame in this new agg block failed in previous agg block(s). + * + * Note that, for aggregation, ACK (block-ack) status is not delivered here; + * block-ack has not been received by the time the 4965 device records + * this status. + * This status relates to reasons the tx might have been blocked or aborted + * within the sending station (this 4965 device), rather than whether it was + * received successfully by the destination station. + */ +struct agg_tx_status { + __le16 status; + __le16 sequence; +} __packed; + +struct iwl4965_tx_resp { + u8 frame_count; /* 1 no aggregation, >1 aggregation */ + u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */ + u8 failure_rts; /* # failures due to unsuccessful RTS */ + u8 failure_frame; /* # failures due to no ACK (unused for agg) */ + + /* For non-agg: Rate at which frame was successful. + * For agg: Rate at which all frames were transmitted. */ + __le32 rate_n_flags; /* RATE_MCS_* */ + + /* For non-agg: RTS + CTS + frame tx attempts time + ACK. + * For agg: RTS + CTS + aggregation tx time + block-ack time. */ + __le16 wireless_media_time; /* uSecs */ + + __le16 reserved; + __le32 pa_power1; /* RF power amplifier measurement (not used) */ + __le32 pa_power2; + + /* + * For non-agg: frame status TX_STATUS_* + * For agg: status of 1st frame, AGG_TX_STATE_*; other frame status + * fields follow this one, up to frame_count. + * Bit fields: + * 11- 0: AGG_TX_STATE_* status code + * 15-12: Retry count for 1st frame in aggregation (retries + * occur if tx failed for this frame when it was a + * member of a previous aggregation block). If rate + * scaling is used, retry count indicates the rate + * table entry used for all frames in the new agg. + * 31-16: Sequence # for this frame's Tx cmd (not SSN!) + */ + union { + __le32 status; + struct agg_tx_status agg_status[0]; /* for each agg frame */ + } u; +} __packed; + +/* + * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) + * + * Reports Block-Acknowledge from recipient station + */ +struct iwl_compressed_ba_resp { + __le32 sta_addr_lo32; + __le16 sta_addr_hi16; + __le16 reserved; + + /* Index of recipient (BA-sending) station in uCode's station table */ + u8 sta_id; + u8 tid; + __le16 seq_ctl; + __le64 bitmap; + __le16 scd_flow; + __le16 scd_ssn; +} __packed; + +/* + * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response) + * + * See details under "TXPOWER" in iwl-4965-hw.h. + */ + +struct iwl3945_txpowertable_cmd { + u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ + u8 reserved; + __le16 channel; + struct iwl3945_power_per_rate power[IWL_MAX_RATES]; +} __packed; + +struct iwl4965_txpowertable_cmd { + u8 band; /* 0: 5 GHz, 1: 2.4 GHz */ + u8 reserved; + __le16 channel; + struct iwl4965_tx_power_db tx_power; +} __packed; + + +/** + * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response + * + * REPLY_RATE_SCALE = 0x47 (command, has simple generic response) + * + * NOTE: The table of rates passed to the uCode via the + * RATE_SCALE command sets up the corresponding order of + * rates used for all related commands, including rate + * masks, etc. + * + * For example, if you set 9MB (PLCP 0x0f) as the first + * rate in the rate table, the bit mask for that rate + * when passed through ofdm_basic_rates on the REPLY_RXON + * command would be bit 0 (1 << 0) + */ +struct iwl3945_rate_scaling_info { + __le16 rate_n_flags; + u8 try_cnt; + u8 next_rate_index; +} __packed; + +struct iwl3945_rate_scaling_cmd { + u8 table_id; + u8 reserved[3]; + struct iwl3945_rate_scaling_info table[IWL_MAX_RATES]; +} __packed; + + +/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */ +#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0) + +/* # of EDCA prioritized tx fifos */ +#define LINK_QUAL_AC_NUM AC_NUM + +/* # entries in rate scale table to support Tx retries */ +#define LINK_QUAL_MAX_RETRY_NUM 16 + +/* Tx antenna selection values */ +#define LINK_QUAL_ANT_A_MSK (1 << 0) +#define LINK_QUAL_ANT_B_MSK (1 << 1) +#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK) + + +/** + * struct iwl_link_qual_general_params + * + * Used in REPLY_TX_LINK_QUALITY_CMD + */ +struct iwl_link_qual_general_params { + u8 flags; + + /* No entries at or above this (driver chosen) index contain MIMO */ + u8 mimo_delimiter; + + /* Best single antenna to use for single stream (legacy, SISO). */ + u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ + + /* Best antennas to use for MIMO (unused for 4965, assumes both). */ + u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ + + /* + * If driver needs to use different initial rates for different + * EDCA QOS access categories (as implemented by tx fifos 0-3), + * this table will set that up, by indicating the indexes in the + * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start. + * Otherwise, driver should set all entries to 0. + * + * Entry usage: + * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice + * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3. + */ + u8 start_rate_index[LINK_QUAL_AC_NUM]; +} __packed; + +#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ +#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) +#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100) + +#define LINK_QUAL_AGG_DISABLE_START_DEF (3) +#define LINK_QUAL_AGG_DISABLE_START_MAX (255) +#define LINK_QUAL_AGG_DISABLE_START_MIN (0) + +#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (31) +#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) +#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) + +/** + * struct iwl_link_qual_agg_params + * + * Used in REPLY_TX_LINK_QUALITY_CMD + */ +struct iwl_link_qual_agg_params { + + /* + *Maximum number of uSec in aggregation. + * default set to 4000 (4 milliseconds) if not configured in .cfg + */ + __le16 agg_time_limit; + + /* + * Number of Tx retries allowed for a frame, before that frame will + * no longer be considered for the start of an aggregation sequence + * (scheduler will then try to tx it as single frame). + * Driver should set this to 3. + */ + u8 agg_dis_start_th; + + /* + * Maximum number of frames in aggregation. + * 0 = no limit (default). 1 = no aggregation. + * Other values = max # frames in aggregation. + */ + u8 agg_frame_cnt_limit; + + __le32 reserved; +} __packed; + +/* + * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response) + * + * For 4965 devices only; 3945 uses REPLY_RATE_SCALE. + * + * Each station in the 4965 device's internal station table has its own table + * of 16 + * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when + * an ACK is not received. This command replaces the entire table for + * one station. + * + * NOTE: Station must already be in 4965 device's station table. + * Use REPLY_ADD_STA. + * + * The rate scaling procedures described below work well. Of course, other + * procedures are possible, and may work better for particular environments. + * + * + * FILLING THE RATE TABLE + * + * Given a particular initial rate and mode, as determined by the rate + * scaling algorithm described below, the Linux driver uses the following + * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the + * Link Quality command: + * + * + * 1) If using High-throughput (HT) (SISO or MIMO) initial rate: + * a) Use this same initial rate for first 3 entries. + * b) Find next lower available rate using same mode (SISO or MIMO), + * use for next 3 entries. If no lower rate available, switch to + * legacy mode (no HT40 channel, no MIMO, no short guard interval). + * c) If using MIMO, set command's mimo_delimiter to number of entries + * using MIMO (3 or 6). + * d) After trying 2 HT rates, switch to legacy mode (no HT40 channel, + * no MIMO, no short guard interval), at the next lower bit rate + * (e.g. if second HT bit rate was 54, try 48 legacy), and follow + * legacy procedure for remaining table entries. + * + * 2) If using legacy initial rate: + * a) Use the initial rate for only one entry. + * b) For each following entry, reduce the rate to next lower available + * rate, until reaching the lowest available rate. + * c) When reducing rate, also switch antenna selection. + * d) Once lowest available rate is reached, repeat this rate until + * rate table is filled (16 entries), switching antenna each entry. + * + * + * ACCUMULATING HISTORY + * + * The rate scaling algorithm for 4965 devices, as implemented in Linux driver, + * uses two sets of frame Tx success history: One for the current/active + * modulation mode, and one for a speculative/search mode that is being + * attempted. If the speculative mode turns out to be more effective (i.e. + * actual transfer rate is better), then the driver continues to use the + * speculative mode as the new current active mode. + * + * Each history set contains, separately for each possible rate, data for a + * sliding window of the 62 most recent tx attempts at that rate. The data + * includes a shifting bitmap of success(1)/failure(0), and sums of successful + * and attempted frames, from which the driver can additionally calculate a + * success ratio (success / attempted) and number of failures + * (attempted - success), and control the size of the window (attempted). + * The driver uses the bit map to remove successes from the success sum, as + * the oldest tx attempts fall out of the window. + * + * When the 4965 device makes multiple tx attempts for a given frame, each + * attempt might be at a different rate, and have different modulation + * characteristics (e.g. antenna, fat channel, short guard interval), as set + * up in the rate scaling table in the Link Quality command. The driver must + * determine which rate table entry was used for each tx attempt, to determine + * which rate-specific history to update, and record only those attempts that + * match the modulation characteristics of the history set. + * + * When using block-ack (aggregation), all frames are transmitted at the same + * rate, since there is no per-attempt acknowledgment from the destination + * station. The Tx response struct iwl_tx_resp indicates the Tx rate in + * rate_n_flags field. After receiving a block-ack, the driver can update + * history for the entire block all at once. + * + * + * FINDING BEST STARTING RATE: + * + * When working with a selected initial modulation mode (see below), the + * driver attempts to find a best initial rate. The initial rate is the + * first entry in the Link Quality command's rate table. + * + * 1) Calculate actual throughput (success ratio * expected throughput, see + * table below) for current initial rate. Do this only if enough frames + * have been attempted to make the value meaningful: at least 6 failed + * tx attempts, or at least 8 successes. If not enough, don't try rate + * scaling yet. + * + * 2) Find available rates adjacent to current initial rate. Available means: + * a) supported by hardware && + * b) supported by association && + * c) within any constraints selected by user + * + * 3) Gather measured throughputs for adjacent rates. These might not have + * enough history to calculate a throughput. That's okay, we might try + * using one of them anyway! + * + * 4) Try decreasing rate if, for current rate: + * a) success ratio is < 15% || + * b) lower adjacent rate has better measured throughput || + * c) higher adjacent rate has worse throughput, and lower is unmeasured + * + * As a sanity check, if decrease was determined above, leave rate + * unchanged if: + * a) lower rate unavailable + * b) success ratio at current rate > 85% (very good) + * c) current measured throughput is better than expected throughput + * of lower rate (under perfect 100% tx conditions, see table below) + * + * 5) Try increasing rate if, for current rate: + * a) success ratio is < 15% || + * b) both adjacent rates' throughputs are unmeasured (try it!) || + * b) higher adjacent rate has better measured throughput || + * c) lower adjacent rate has worse throughput, and higher is unmeasured + * + * As a sanity check, if increase was determined above, leave rate + * unchanged if: + * a) success ratio at current rate < 70%. This is not particularly + * good performance; higher rate is sure to have poorer success. + * + * 6) Re-evaluate the rate after each tx frame. If working with block- + * acknowledge, history and statistics may be calculated for the entire + * block (including prior history that fits within the history windows), + * before re-evaluation. + * + * FINDING BEST STARTING MODULATION MODE: + * + * After working with a modulation mode for a "while" (and doing rate scaling), + * the driver searches for a new initial mode in an attempt to improve + * throughput. The "while" is measured by numbers of attempted frames: + * + * For legacy mode, search for new mode after: + * 480 successful frames, or 160 failed frames + * For high-throughput modes (SISO or MIMO), search for new mode after: + * 4500 successful frames, or 400 failed frames + * + * Mode switch possibilities are (3 for each mode): + * + * For legacy: + * Change antenna, try SISO (if HT association), try MIMO (if HT association) + * For SISO: + * Change antenna, try MIMO, try shortened guard interval (SGI) + * For MIMO: + * Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI) + * + * When trying a new mode, use the same bit rate as the old/current mode when + * trying antenna switches and shortened guard interval. When switching to + * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate + * for which the expected throughput (under perfect conditions) is about the + * same or slightly better than the actual measured throughput delivered by + * the old/current mode. + * + * Actual throughput can be estimated by multiplying the expected throughput + * by the success ratio (successful / attempted tx frames). Frame size is + * not considered in this calculation; it assumes that frame size will average + * out to be fairly consistent over several samples. The following are + * metric values for expected throughput assuming 100% success ratio. + * Only G band has support for CCK rates: + * + * RATE: 1 2 5 11 6 9 12 18 24 36 48 54 60 + * + * G: 7 13 35 58 40 57 72 98 121 154 177 186 186 + * A: 0 0 0 0 40 57 72 98 121 154 177 186 186 + * SISO 20MHz: 0 0 0 0 42 42 76 102 124 159 183 193 202 + * SGI SISO 20MHz: 0 0 0 0 46 46 82 110 132 168 192 202 211 + * MIMO 20MHz: 0 0 0 0 74 74 123 155 179 214 236 244 251 + * SGI MIMO 20MHz: 0 0 0 0 81 81 131 164 188 222 243 251 257 + * SISO 40MHz: 0 0 0 0 77 77 127 160 184 220 242 250 257 + * SGI SISO 40MHz: 0 0 0 0 83 83 135 169 193 229 250 257 264 + * MIMO 40MHz: 0 0 0 0 123 123 182 214 235 264 279 285 289 + * SGI MIMO 40MHz: 0 0 0 0 131 131 191 222 242 270 284 289 293 + * + * After the new mode has been tried for a short while (minimum of 6 failed + * frames or 8 successful frames), compare success ratio and actual throughput + * estimate of the new mode with the old. If either is better with the new + * mode, continue to use the new mode. + * + * Continue comparing modes until all 3 possibilities have been tried. + * If moving from legacy to HT, try all 3 possibilities from the new HT + * mode. After trying all 3, a best mode is found. Continue to use this mode + * for the longer "while" described above (e.g. 480 successful frames for + * legacy), and then repeat the search process. + * + */ +struct iwl_link_quality_cmd { + + /* Index of destination/recipient station in uCode's station table */ + u8 sta_id; + u8 reserved1; + __le16 control; /* not used */ + struct iwl_link_qual_general_params general_params; + struct iwl_link_qual_agg_params agg_params; + + /* + * Rate info; when using rate-scaling, Tx command's initial_rate_index + * specifies 1st Tx rate attempted, via index into this table. + * 4965 devices works its way through table when retrying Tx. + */ + struct { + __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */ + } rs_table[LINK_QUAL_MAX_RETRY_NUM]; + __le32 reserved2; +} __packed; + +/* + * BT configuration enable flags: + * bit 0 - 1: BT channel announcement enabled + * 0: disable + * bit 1 - 1: priority of BT device enabled + * 0: disable + */ +#define BT_COEX_DISABLE (0x0) +#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0) +#define BT_ENABLE_PRIORITY BIT(1) + +#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY) + +#define BT_LEAD_TIME_DEF (0x1E) + +#define BT_MAX_KILL_DEF (0x5) + +/* + * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) + * + * 3945 and 4965 devices support hardware handshake with Bluetooth device on + * same platform. Bluetooth device alerts wireless device when it will Tx; + * wireless device can delay or kill its own Tx to accommodate. + */ +struct iwl_bt_cmd { + u8 flags; + u8 lead_time; + u8 max_kill; + u8 reserved; + __le32 kill_ack_mask; + __le32 kill_cts_mask; +} __packed; + + +/****************************************************************************** + * (6) + * Spectrum Management (802.11h) Commands, Responses, Notifications: + * + *****************************************************************************/ + +/* + * Spectrum Management + */ +#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK | \ + RXON_FILTER_CTL2HOST_MSK | \ + RXON_FILTER_ACCEPT_GRP_MSK | \ + RXON_FILTER_DIS_DECRYPT_MSK | \ + RXON_FILTER_DIS_GRP_DECRYPT_MSK | \ + RXON_FILTER_ASSOC_MSK | \ + RXON_FILTER_BCON_AWARE_MSK) + +struct iwl_measure_channel { + __le32 duration; /* measurement duration in extended beacon + * format */ + u8 channel; /* channel to measure */ + u8 type; /* see enum iwl_measure_type */ + __le16 reserved; +} __packed; + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command) + */ +struct iwl_spectrum_cmd { + __le16 len; /* number of bytes starting from token */ + u8 token; /* token id */ + u8 id; /* measurement id -- 0 or 1 */ + u8 origin; /* 0 = TGh, 1 = other, 2 = TGk */ + u8 periodic; /* 1 = periodic */ + __le16 path_loss_timeout; + __le32 start_time; /* start time in extended beacon format */ + __le32 reserved2; + __le32 flags; /* rxon flags */ + __le32 filter_flags; /* rxon filter flags */ + __le16 channel_count; /* minimum 1, maximum 10 */ + __le16 reserved3; + struct iwl_measure_channel channels[10]; +} __packed; + +/* + * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response) + */ +struct iwl_spectrum_resp { + u8 token; + u8 id; /* id of the prior command replaced, or 0xff */ + __le16 status; /* 0 - command will be handled + * 1 - cannot handle (conflicts with another + * measurement) */ +} __packed; + +enum iwl_measurement_state { + IWL_MEASUREMENT_START = 0, + IWL_MEASUREMENT_STOP = 1, +}; + +enum iwl_measurement_status { + IWL_MEASUREMENT_OK = 0, + IWL_MEASUREMENT_CONCURRENT = 1, + IWL_MEASUREMENT_CSA_CONFLICT = 2, + IWL_MEASUREMENT_TGH_CONFLICT = 3, + /* 4-5 reserved */ + IWL_MEASUREMENT_STOPPED = 6, + IWL_MEASUREMENT_TIMEOUT = 7, + IWL_MEASUREMENT_PERIODIC_FAILED = 8, +}; + +#define NUM_ELEMENTS_IN_HISTOGRAM 8 + +struct iwl_measurement_histogram { + __le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */ + __le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */ +} __packed; + +/* clear channel availability counters */ +struct iwl_measurement_cca_counters { + __le32 ofdm; + __le32 cck; +} __packed; + +enum iwl_measure_type { + IWL_MEASURE_BASIC = (1 << 0), + IWL_MEASURE_CHANNEL_LOAD = (1 << 1), + IWL_MEASURE_HISTOGRAM_RPI = (1 << 2), + IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3), + IWL_MEASURE_FRAME = (1 << 4), + /* bits 5:6 are reserved */ + IWL_MEASURE_IDLE = (1 << 7), +}; + +/* + * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command) + */ +struct iwl_spectrum_notification { + u8 id; /* measurement id -- 0 or 1 */ + u8 token; + u8 channel_index; /* index in measurement channel list */ + u8 state; /* 0 - start, 1 - stop */ + __le32 start_time; /* lower 32-bits of TSF */ + u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */ + u8 channel; + u8 type; /* see enum iwl_measurement_type */ + u8 reserved1; + /* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only + * valid if applicable for measurement type requested. */ + __le32 cca_ofdm; /* cca fraction time in 40Mhz clock periods */ + __le32 cca_cck; /* cca fraction time in 44Mhz clock periods */ + __le32 cca_time; /* channel load time in usecs */ + u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 - + * unidentified */ + u8 reserved2[3]; + struct iwl_measurement_histogram histogram; + __le32 stop_time; /* lower 32-bits of TSF */ + __le32 status; /* see iwl_measurement_status */ +} __packed; + +/****************************************************************************** + * (7) + * Power Management Commands, Responses, Notifications: + * + *****************************************************************************/ + +/** + * struct iwl_powertable_cmd - Power Table Command + * @flags: See below: + * + * POWER_TABLE_CMD = 0x77 (command, has simple generic response) + * + * PM allow: + * bit 0 - '0' Driver not allow power management + * '1' Driver allow PM (use rest of parameters) + * + * uCode send sleep notifications: + * bit 1 - '0' Don't send sleep notification + * '1' send sleep notification (SEND_PM_NOTIFICATION) + * + * Sleep over DTIM + * bit 2 - '0' PM have to walk up every DTIM + * '1' PM could sleep over DTIM till listen Interval. + * + * PCI power managed + * bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1) + * '1' !(PCI_CFG_LINK_CTRL & 0x1) + * + * Fast PD + * bit 4 - '1' Put radio to sleep when receiving frame for others + * + * Force sleep Modes + * bit 31/30- '00' use both mac/xtal sleeps + * '01' force Mac sleep + * '10' force xtal sleep + * '11' Illegal set + * + * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then + * ucode assume sleep over DTIM is allowed and we don't need to wake up + * for every DTIM. + */ +#define IWL_POWER_VEC_SIZE 5 + +#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) +#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0)) +#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1)) +#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2)) +#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) +#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4)) +#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5)) +#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6)) +#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7)) + +struct iwl3945_powertable_cmd { + __le16 flags; + u8 reserved[2]; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; +} __packed; + +struct iwl_powertable_cmd { + __le16 flags; + u8 keep_alive_seconds; /* 3945 reserved */ + u8 debug_flags; /* 3945 reserved */ + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; + __le32 keep_alive_beacons; +} __packed; + +/* + * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command) + * all devices identical. + */ +struct iwl_sleep_notification { + u8 pm_sleep_mode; + u8 pm_wakeup_src; + __le16 reserved; + __le32 sleep_time; + __le32 tsf_low; + __le32 bcon_timer; +} __packed; + +/* Sleep states. all devices identical. */ +enum { + IWL_PM_NO_SLEEP = 0, + IWL_PM_SLP_MAC = 1, + IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2, + IWL_PM_SLP_FULL_MAC_CARD_STATE = 3, + IWL_PM_SLP_PHY = 4, + IWL_PM_SLP_REPENT = 5, + IWL_PM_WAKEUP_BY_TIMER = 6, + IWL_PM_WAKEUP_BY_DRIVER = 7, + IWL_PM_WAKEUP_BY_RFKILL = 8, + /* 3 reserved */ + IWL_PM_NUM_OF_MODES = 12, +}; + +/* + * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command) + */ +struct iwl_card_state_notif { + __le32 flags; +} __packed; + +#define HW_CARD_DISABLED 0x01 +#define SW_CARD_DISABLED 0x02 +#define CT_CARD_DISABLED 0x04 +#define RXON_CARD_DISABLED 0x10 + +struct iwl_ct_kill_config { + __le32 reserved; + __le32 critical_temperature_M; + __le32 critical_temperature_R; +} __packed; + +/****************************************************************************** + * (8) + * Scan Commands, Responses, Notifications: + * + *****************************************************************************/ + +#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0) +#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1) + +/** + * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table + * + * One for each channel in the scan list. + * Each channel can independently select: + * 1) SSID for directed active scans + * 2) Txpower setting (for rate specified within Tx command) + * 3) How long to stay on-channel (behavior may be modified by quiet_time, + * quiet_plcp_th, good_CRC_th) + * + * To avoid uCode errors, make sure the following are true (see comments + * under struct iwl_scan_cmd about max_out_time and quiet_time): + * 1) If using passive_dwell (i.e. passive_dwell != 0): + * active_dwell <= passive_dwell (< max_out_time if max_out_time != 0) + * 2) quiet_time <= active_dwell + * 3) If restricting off-channel time (i.e. max_out_time !=0): + * passive_dwell < max_out_time + * active_dwell < max_out_time + */ +struct iwl3945_scan_channel { + /* + * type is defined as: + * 0:0 1 = active, 0 = passive + * 1:4 SSID direct bit map; if a bit is set, then corresponding + * SSID IE is transmitted in probe request. + * 5:7 reserved + */ + u8 type; + u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */ + struct iwl3945_tx_power tpc; + __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ + __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ +} __packed; + +/* set number of direct probes u8 type */ +#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1)))) + +struct iwl_scan_channel { + /* + * type is defined as: + * 0:0 1 = active, 0 = passive + * 1:20 SSID direct bit map; if a bit is set, then corresponding + * SSID IE is transmitted in probe request. + * 21:31 reserved + */ + __le32 type; + __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */ + u8 tx_gain; /* gain for analog radio */ + u8 dsp_atten; /* gain for DSP */ + __le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */ + __le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */ +} __packed; + +/* set number of direct probes __le32 type */ +#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1)))) + +/** + * struct iwl_ssid_ie - directed scan network information element + * + * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in + * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel; + * each channel may select different ssids from among the 20 (4) entries. + * SSID IEs get transmitted in reverse order of entry. + */ +struct iwl_ssid_ie { + u8 id; + u8 len; + u8 ssid[32]; +} __packed; + +#define PROBE_OPTION_MAX_3945 4 +#define PROBE_OPTION_MAX 20 +#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) +#define IWL_GOOD_CRC_TH_DISABLED 0 +#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) +#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff) +#define IWL_MAX_SCAN_SIZE 1024 +#define IWL_MAX_CMD_SIZE 4096 + +/* + * REPLY_SCAN_CMD = 0x80 (command) + * + * The hardware scan command is very powerful; the driver can set it up to + * maintain (relatively) normal network traffic while doing a scan in the + * background. The max_out_time and suspend_time control the ratio of how + * long the device stays on an associated network channel ("service channel") + * vs. how long it's away from the service channel, i.e. tuned to other channels + * for scanning. + * + * max_out_time is the max time off-channel (in usec), and suspend_time + * is how long (in "extended beacon" format) that the scan is "suspended" + * after returning to the service channel. That is, suspend_time is the + * time that we stay on the service channel, doing normal work, between + * scan segments. The driver may set these parameters differently to support + * scanning when associated vs. not associated, and light vs. heavy traffic + * loads when associated. + * + * After receiving this command, the device's scan engine does the following; + * + * 1) Sends SCAN_START notification to driver + * 2) Checks to see if it has time to do scan for one channel + * 3) Sends NULL packet, with power-save (PS) bit set to 1, + * to tell AP that we're going off-channel + * 4) Tunes to first channel in scan list, does active or passive scan + * 5) Sends SCAN_RESULT notification to driver + * 6) Checks to see if it has time to do scan on *next* channel in list + * 7) Repeats 4-6 until it no longer has time to scan the next channel + * before max_out_time expires + * 8) Returns to service channel + * 9) Sends NULL packet with PS=0 to tell AP that we're back + * 10) Stays on service channel until suspend_time expires + * 11) Repeats entire process 2-10 until list is complete + * 12) Sends SCAN_COMPLETE notification + * + * For fast, efficient scans, the scan command also has support for staying on + * a channel for just a short time, if doing active scanning and getting no + * responses to the transmitted probe request. This time is controlled by + * quiet_time, and the number of received packets below which a channel is + * considered "quiet" is controlled by quiet_plcp_threshold. + * + * For active scanning on channels that have regulatory restrictions against + * blindly transmitting, the scan can listen before transmitting, to make sure + * that there is already legitimate activity on the channel. If enough + * packets are cleanly received on the channel (controlled by good_CRC_th, + * typical value 1), the scan engine starts transmitting probe requests. + * + * Driver must use separate scan commands for 2.4 vs. 5 GHz bands. + * + * To avoid uCode errors, see timing restrictions described under + * struct iwl_scan_channel. + */ + +struct iwl3945_scan_cmd { + __le16 len; + u8 reserved0; + u8 channel_count; /* # channels in channel list */ + __le16 quiet_time; /* dwell only this # millisecs on quiet channel + * (only for active scan) */ + __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */ + __le16 good_CRC_th; /* passive -> active promotion threshold */ + __le16 reserved1; + __le32 max_out_time; /* max usec to be away from associated (service) + * channel */ + __le32 suspend_time; /* pause scan this long (in "extended beacon + * format") when returning to service channel: + * 3945; 31:24 # beacons, 19:0 additional usec, + * 4965; 31:22 # beacons, 21:0 additional usec. + */ + __le32 flags; /* RXON_FLG_* */ + __le32 filter_flags; /* RXON_FILTER_* */ + + /* For active scans (set to all-0s for passive scans). + * Does not include payload. Must specify Tx rate; no rate scaling. */ + struct iwl3945_tx_cmd tx_cmd; + + /* For directed active scans (set to all-0s otherwise) */ + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945]; + + /* + * Probe request frame, followed by channel list. + * + * Size of probe request frame is specified by byte count in tx_cmd. + * Channel list follows immediately after probe request frame. + * Number of channels in list is specified by channel_count. + * Each channel in list is of type: + * + * struct iwl3945_scan_channel channels[0]; + * + * NOTE: Only one band of channels can be scanned per pass. You + * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait + * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) + * before requesting another scan. + */ + u8 data[0]; +} __packed; + +struct iwl_scan_cmd { + __le16 len; + u8 reserved0; + u8 channel_count; /* # channels in channel list */ + __le16 quiet_time; /* dwell only this # millisecs on quiet channel + * (only for active scan) */ + __le16 quiet_plcp_th; /* quiet chnl is < this # pkts (typ. 1) */ + __le16 good_CRC_th; /* passive -> active promotion threshold */ + __le16 rx_chain; /* RXON_RX_CHAIN_* */ + __le32 max_out_time; /* max usec to be away from associated (service) + * channel */ + __le32 suspend_time; /* pause scan this long (in "extended beacon + * format") when returning to service chnl: + * 3945; 31:24 # beacons, 19:0 additional usec, + * 4965; 31:22 # beacons, 21:0 additional usec. + */ + __le32 flags; /* RXON_FLG_* */ + __le32 filter_flags; /* RXON_FILTER_* */ + + /* For active scans (set to all-0s for passive scans). + * Does not include payload. Must specify Tx rate; no rate scaling. */ + struct iwl_tx_cmd tx_cmd; + + /* For directed active scans (set to all-0s otherwise) */ + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; + + /* + * Probe request frame, followed by channel list. + * + * Size of probe request frame is specified by byte count in tx_cmd. + * Channel list follows immediately after probe request frame. + * Number of channels in list is specified by channel_count. + * Each channel in list is of type: + * + * struct iwl_scan_channel channels[0]; + * + * NOTE: Only one band of channels can be scanned per pass. You + * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait + * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) + * before requesting another scan. + */ + u8 data[0]; +} __packed; + +/* Can abort will notify by complete notification with abort status. */ +#define CAN_ABORT_STATUS cpu_to_le32(0x1) +/* complete notification statuses */ +#define ABORT_STATUS 0x2 + +/* + * REPLY_SCAN_CMD = 0x80 (response) + */ +struct iwl_scanreq_notification { + __le32 status; /* 1: okay, 2: cannot fulfill request */ +} __packed; + +/* + * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command) + */ +struct iwl_scanstart_notification { + __le32 tsf_low; + __le32 tsf_high; + __le32 beacon_timer; + u8 channel; + u8 band; + u8 reserved[2]; + __le32 status; +} __packed; + +#define SCAN_OWNER_STATUS 0x1; +#define MEASURE_OWNER_STATUS 0x2; + +#define IWL_PROBE_STATUS_OK 0 +#define IWL_PROBE_STATUS_TX_FAILED BIT(0) +/* error statuses combined with TX_FAILED */ +#define IWL_PROBE_STATUS_FAIL_TTL BIT(1) +#define IWL_PROBE_STATUS_FAIL_BT BIT(2) + +#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */ +/* + * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command) + */ +struct iwl_scanresults_notification { + u8 channel; + u8 band; + u8 probe_status; + u8 num_probe_not_sent; /* not enough time to send */ + __le32 tsf_low; + __le32 tsf_high; + __le32 statistics[NUMBER_OF_STATISTICS]; +} __packed; + +/* + * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command) + */ +struct iwl_scancomplete_notification { + u8 scanned_channels; + u8 status; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; +} __packed; + + +/****************************************************************************** + * (9) + * IBSS/AP Commands and Notifications: + * + *****************************************************************************/ + +enum iwl_ibss_manager { + IWL_NOT_IBSS_MANAGER = 0, + IWL_IBSS_MANAGER = 1, +}; + +/* + * BEACON_NOTIFICATION = 0x90 (notification only, not a command) + */ + +struct iwl3945_beacon_notif { + struct iwl3945_tx_resp beacon_notify_hdr; + __le32 low_tsf; + __le32 high_tsf; + __le32 ibss_mgr_status; +} __packed; + +struct iwl4965_beacon_notif { + struct iwl4965_tx_resp beacon_notify_hdr; + __le32 low_tsf; + __le32 high_tsf; + __le32 ibss_mgr_status; +} __packed; + +/* + * REPLY_TX_BEACON = 0x91 (command, has simple generic response) + */ + +struct iwl3945_tx_beacon_cmd { + struct iwl3945_tx_cmd tx; + __le16 tim_idx; + u8 tim_size; + u8 reserved1; + struct ieee80211_hdr frame[0]; /* beacon frame */ +} __packed; + +struct iwl_tx_beacon_cmd { + struct iwl_tx_cmd tx; + __le16 tim_idx; + u8 tim_size; + u8 reserved1; + struct ieee80211_hdr frame[0]; /* beacon frame */ +} __packed; + +/****************************************************************************** + * (10) + * Statistics Commands and Notifications: + * + *****************************************************************************/ + +#define IWL_TEMP_CONVERT 260 + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/* Used for passing to driver number of successes and failures per rate */ +struct rate_histogram { + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } success; + union { + __le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS]; + __le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS]; + __le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS]; + } failed; +} __packed; + +/* statistics command response */ + +struct iwl39_statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; +} __packed; + +struct iwl39_statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ +} __packed; + +struct iwl39_statistics_rx { + struct iwl39_statistics_rx_phy ofdm; + struct iwl39_statistics_rx_phy cck; + struct iwl39_statistics_rx_non_phy general; +} __packed; + +struct iwl39_statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; +} __packed; + +struct statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 wait_for_silence_timeout_cnt; + __le32 reserved[3]; +} __packed; + +struct iwl39_statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; +} __packed; + +struct iwl39_statistics_general { + __le32 temperature; + struct statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct iwl39_statistics_div div; +} __packed; + +struct statistics_rx_phy { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_limit_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved3; +} __packed; + +struct statistics_rx_ht_phy { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +} __packed; + +#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1) + +struct statistics_rx_non_phy { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ + __le32 channel_beacons; /* beacons with our bss id and in our + * serving channel */ + __le32 num_missed_bcon; /* number of missed beacons */ + __le32 adc_rx_saturation_time; /* count in 0.8us units the time the + * ADC was in saturation */ + __le32 ina_detection_search_time;/* total time (in 0.8us) searched + * for INA */ + __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ + __le32 interference_data_flag; /* flag for interference data + * availability. 1 when data is + * available. */ + __le32 channel_load; /* counts RX Enable time in uSec */ + __le32 dsp_false_alarms; /* DSP false alarm (both OFDM + * and CCK) counter */ + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; +} __packed; + +struct statistics_rx { + struct statistics_rx_phy ofdm; + struct statistics_rx_phy cck; + struct statistics_rx_non_phy general; + struct statistics_rx_ht_phy ofdm_ht; +} __packed; + +/** + * struct statistics_tx_power - current tx power + * + * @ant_a: current tx power on chain a in 1/2 dB step + * @ant_b: current tx power on chain b in 1/2 dB step + * @ant_c: current tx power on chain c in 1/2 dB step + */ +struct statistics_tx_power { + u8 ant_a; + u8 ant_b; + u8 ant_c; + u8 reserved; +} __packed; + +struct statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; +} __packed; + +struct statistics_tx { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; + struct statistics_tx_non_phy_agg agg; + + __le32 reserved1; +} __packed; + + +struct statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; + __le32 reserved1; + __le32 reserved2; +} __packed; + +struct statistics_general_common { + __le32 temperature; /* radio temperature */ + struct statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct statistics_div div; + __le32 rx_enable_counter; + /* + * num_of_sos_states: + * count the number of times we have to re-tune + * in order to get out of bad PHY status + */ + __le32 num_of_sos_states; +} __packed; + +struct statistics_general { + struct statistics_general_common common; + __le32 reserved2; + __le32 reserved3; +} __packed; + +#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0) +#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1) +#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2) + +/* + * REPLY_STATISTICS_CMD = 0x9c, + * all devices identical. + * + * This command triggers an immediate response containing uCode statistics. + * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below. + * + * If the CLEAR_STATS configuration flag is set, uCode will clear its + * internal copy of the statistics (counters) after issuing the response. + * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below). + * + * If the DISABLE_NOTIF configuration flag is set, uCode will not issue + * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag + * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself. + */ +#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */ +#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */ +struct iwl_statistics_cmd { + __le32 configuration_flags; /* IWL_STATS_CONF_* */ +} __packed; + +/* + * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) + * + * By default, uCode issues this notification after receiving a beacon + * while associated. To disable this behavior, set DISABLE_NOTIF flag in the + * REPLY_STATISTICS_CMD 0x9c, above. + * + * Statistics counters continue to increment beacon after beacon, but are + * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD + * 0x9c with CLEAR_STATS bit set (see above). + * + * uCode also issues this notification during scans. uCode clears statistics + * appropriately so that each notification contains statistics for only the + * one channel that has just been scanned. + */ +#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2) +#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8) + +struct iwl3945_notif_statistics { + __le32 flag; + struct iwl39_statistics_rx rx; + struct iwl39_statistics_tx tx; + struct iwl39_statistics_general general; +} __packed; + +struct iwl_notif_statistics { + __le32 flag; + struct statistics_rx rx; + struct statistics_tx tx; + struct statistics_general general; +} __packed; + +/* + * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command) + * + * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed + * in regardless of how many missed beacons, which mean when driver receive the + * notification, inside the command, it can find all the beacons information + * which include number of total missed beacons, number of consecutive missed + * beacons, number of beacons received and number of beacons expected to + * receive. + * + * If uCode detected consecutive_missed_beacons > 5, it will reset the radio + * in order to bring the radio/PHY back to working state; which has no relation + * to when driver will perform sensitivity calibration. + * + * Driver should set it own missed_beacon_threshold to decide when to perform + * sensitivity calibration based on number of consecutive missed beacons in + * order to improve overall performance, especially in noisy environment. + * + */ + +#define IWL_MISSED_BEACON_THRESHOLD_MIN (1) +#define IWL_MISSED_BEACON_THRESHOLD_DEF (5) +#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF + +struct iwl_missed_beacon_notif { + __le32 consecutive_missed_beacons; + __le32 total_missed_becons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __packed; + + +/****************************************************************************** + * (11) + * Rx Calibration Commands: + * + * With the uCode used for open source drivers, most Tx calibration (except + * for Tx Power) and most Rx calibration is done by uCode during the + * "initialize" phase of uCode boot. Driver must calibrate only: + * + * 1) Tx power (depends on temperature), described elsewhere + * 2) Receiver gain balance (optimize MIMO, and detect disconnected antennas) + * 3) Receiver sensitivity (to optimize signal detection) + * + *****************************************************************************/ + +/** + * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) + * + * This command sets up the Rx signal detector for a sensitivity level that + * is high enough to lock onto all signals within the associated network, + * but low enough to ignore signals that are below a certain threshold, so as + * not to have too many "false alarms". False alarms are signals that the + * Rx DSP tries to lock onto, but then discards after determining that they + * are noise. + * + * The optimum number of false alarms is between 5 and 50 per 200 TUs + * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e. + * time listening, not transmitting). Driver must adjust sensitivity so that + * the ratio of actual false alarms to actual Rx time falls within this range. + * + * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each + * received beacon. These provide information to the driver to analyze the + * sensitivity. Don't analyze statistics that come in from scanning, or any + * other non-associated-network source. Pertinent statistics include: + * + * From "general" statistics (struct statistics_rx_non_phy): + * + * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level) + * Measure of energy of desired signal. Used for establishing a level + * below which the device does not detect signals. + * + * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB) + * Measure of background noise in silent period after beacon. + * + * channel_load + * uSecs of actual Rx time during beacon period (varies according to + * how much time was spent transmitting). + * + * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately: + * + * false_alarm_cnt + * Signal locks abandoned early (before phy-level header). + * + * plcp_err + * Signal locks abandoned late (during phy-level header). + * + * NOTE: Both false_alarm_cnt and plcp_err increment monotonically from + * beacon to beacon, i.e. each value is an accumulation of all errors + * before and including the latest beacon. Values will wrap around to 0 + * after counting up to 2^32 - 1. Driver must differentiate vs. + * previous beacon's values to determine # false alarms in the current + * beacon period. + * + * Total number of false alarms = false_alarms + plcp_errs + * + * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd + * (notice that the start points for OFDM are at or close to settings for + * maximum sensitivity): + * + * START / MIN / MAX + * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120 + * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140 + * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270 + * + * If actual rate of OFDM false alarms (+ plcp_errors) is too high + * (greater than 50 for each 204.8 msecs listening), reduce sensitivity + * by *adding* 1 to all 4 of the table entries above, up to the max for + * each entry. Conversely, if false alarm rate is too low (less than 5 + * for each 204.8 msecs listening), *subtract* 1 from each entry to + * increase sensitivity. + * + * For CCK sensitivity, keep track of the following: + * + * 1). 20-beacon history of maximum background noise, indicated by + * (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the + * 3 receivers. For any given beacon, the "silence reference" is + * the maximum of last 60 samples (20 beacons * 3 receivers). + * + * 2). 10-beacon history of strongest signal level, as indicated + * by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers, + * i.e. the strength of the signal through the best receiver at the + * moment. These measurements are "upside down", with lower values + * for stronger signals, so max energy will be *minimum* value. + * + * Then for any given beacon, the driver must determine the *weakest* + * of the strongest signals; this is the minimum level that needs to be + * successfully detected, when using the best receiver at the moment. + * "Max cck energy" is the maximum (higher value means lower energy!) + * of the last 10 minima. Once this is determined, driver must add + * a little margin by adding "6" to it. + * + * 3). Number of consecutive beacon periods with too few false alarms. + * Reset this to 0 at the first beacon period that falls within the + * "good" range (5 to 50 false alarms per 204.8 milliseconds rx). + * + * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd + * (notice that the start points for CCK are at maximum sensitivity): + * + * START / MIN / MAX + * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200 + * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400 + * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100 + * + * If actual rate of CCK false alarms (+ plcp_errors) is too high + * (greater than 50 for each 204.8 msecs listening), method for reducing + * sensitivity is: + * + * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, + * up to max 400. + * + * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160, + * sensitivity has been reduced a significant amount; bring it up to + * a moderate 161. Otherwise, *add* 3, up to max 200. + * + * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160, + * sensitivity has been reduced only a moderate or small amount; + * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX, + * down to min 0. Otherwise (if gain has been significantly reduced), + * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value. + * + * b) Save a snapshot of the "silence reference". + * + * If actual rate of CCK false alarms (+ plcp_errors) is too low + * (less than 5 for each 204.8 msecs listening), method for increasing + * sensitivity is used only if: + * + * 1a) Previous beacon did not have too many false alarms + * 1b) AND difference between previous "silence reference" and current + * "silence reference" (prev - current) is 2 or more, + * OR 2) 100 or more consecutive beacon periods have had rate of + * less than 5 false alarms per 204.8 milliseconds rx time. + * + * Method for increasing sensitivity: + * + * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX, + * down to min 125. + * + * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX, + * down to min 200. + * + * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100. + * + * If actual rate of CCK false alarms (+ plcp_errors) is within good range + * (between 5 and 50 for each 204.8 msecs listening): + * + * 1) Save a snapshot of the silence reference. + * + * 2) If previous beacon had too many CCK false alarms (+ plcp_errors), + * give some extra margin to energy threshold by *subtracting* 8 + * from value in HD_MIN_ENERGY_CCK_DET_INDEX. + * + * For all cases (too few, too many, good range), make sure that the CCK + * detection threshold (energy) is below the energy level for robust + * detection over the past 10 beacon periods, the "Max cck energy". + * Lower values mean higher energy; this means making sure that the value + * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". + * + */ + +/* + * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd) + */ +#define HD_TABLE_SIZE (11) /* number of entries */ +#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */ +#define HD_MIN_ENERGY_OFDM_DET_INDEX (1) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2) +#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5) +#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6) +#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7) +#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8) +#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9) +#define HD_OFDM_ENERGY_TH_IN_INDEX (10) + +/* Control field in struct iwl_sensitivity_cmd */ +#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0) +#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1) + +/** + * struct iwl_sensitivity_cmd + * @control: (1) updates working table, (0) updates default table + * @table: energy threshold values, use HD_* as index into table + * + * Always use "1" in "control" to update uCode's working table and DSP. + */ +struct iwl_sensitivity_cmd { + __le16 control; /* always use "1" */ + __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */ +} __packed; + + +/** + * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response) + * + * This command sets the relative gains of 4965 device's 3 radio receiver chains. + * + * After the first association, driver should accumulate signal and noise + * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20 + * beacons from the associated network (don't collect statistics that come + * in from scanning, or any other non-network source). + * + * DISCONNECTED ANTENNA: + * + * Driver should determine which antennas are actually connected, by comparing + * average beacon signal levels for the 3 Rx chains. Accumulate (add) the + * following values over 20 beacons, one accumulator for each of the chains + * a/b/c, from struct statistics_rx_non_phy: + * + * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB) + * + * Find the strongest signal from among a/b/c. Compare the other two to the + * strongest. If any signal is more than 15 dB (times 20, unless you + * divide the accumulated values by 20) below the strongest, the driver + * considers that antenna to be disconnected, and should not try to use that + * antenna/chain for Rx or Tx. If both A and B seem to be disconnected, + * driver should declare the stronger one as connected, and attempt to use it + * (A and B are the only 2 Tx chains!). + * + * + * RX BALANCE: + * + * Driver should balance the 3 receivers (but just the ones that are connected + * to antennas, see above) for gain, by comparing the average signal levels + * detected during the silence after each beacon (background noise). + * Accumulate (add) the following values over 20 beacons, one accumulator for + * each of the chains a/b/c, from struct statistics_rx_non_phy: + * + * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB) + * + * Find the weakest background noise level from among a/b/c. This Rx chain + * will be the reference, with 0 gain adjustment. Attenuate other channels by + * finding noise difference: + * + * (accum_noise[i] - accum_noise[reference]) / 30 + * + * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB. + * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the + * driver should limit the difference results to a range of 0-3 (0-4.5 dB), + * and set bit 2 to indicate "reduce gain". The value for the reference + * (weakest) chain should be "0". + * + * diff_gain_[abc] bit fields: + * 2: (1) reduce gain, (0) increase gain + * 1-0: amount of gain, units of 1.5 dB + */ + +/* Phy calibration command for series */ +/* The default calibrate table size if not specified by firmware */ +#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18 +enum { + IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7, + IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19, +}; + +#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253) + +struct iwl_calib_hdr { + u8 op_code; + u8 first_group; + u8 groups_num; + u8 data_valid; +} __packed; + +/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */ +struct iwl_calib_diff_gain_cmd { + struct iwl_calib_hdr hdr; + s8 diff_gain_a; /* see above */ + s8 diff_gain_b; + s8 diff_gain_c; + u8 reserved1; +} __packed; + +/****************************************************************************** + * (12) + * Miscellaneous Commands: + * + *****************************************************************************/ + +/* + * LEDs Command & Response + * REPLY_LEDS_CMD = 0x48 (command, has simple generic response) + * + * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field), + * this command turns it on or off, or sets up a periodic blinking cycle. + */ +struct iwl_led_cmd { + __le32 interval; /* "interval" in uSec */ + u8 id; /* 1: Activity, 2: Link, 3: Tech */ + u8 off; /* # intervals off while blinking; + * "0", with >0 "on" value, turns LED on */ + u8 on; /* # intervals on while blinking; + * "0", regardless of "off", turns LED off */ + u8 reserved; +} __packed; + + +/****************************************************************************** + * (13) + * Union of all expected notifications/responses: + * + *****************************************************************************/ + +struct iwl_rx_packet { + /* + * The first 4 bytes of the RX frame header contain both the RX frame + * size and some flags. + * Bit fields: + * 31: flag flush RB request + * 30: flag ignore TC (terminal counter) request + * 29: flag fast IRQ request + * 28-14: Reserved + * 13-00: RX frame size + */ + __le32 len_n_flags; + struct iwl_cmd_header hdr; + union { + struct iwl3945_rx_frame rx_frame; + struct iwl3945_tx_resp tx_resp; + struct iwl3945_beacon_notif beacon_status; + + struct iwl_alive_resp alive_frame; + struct iwl_spectrum_notification spectrum_notif; + struct iwl_csa_notification csa_notif; + struct iwl_error_resp err_resp; + struct iwl_card_state_notif card_state_notif; + struct iwl_add_sta_resp add_sta; + struct iwl_rem_sta_resp rem_sta; + struct iwl_sleep_notification sleep_notif; + struct iwl_spectrum_resp spectrum; + struct iwl_notif_statistics stats; + struct iwl_compressed_ba_resp compressed_ba; + struct iwl_missed_beacon_notif missed_beacon; + __le32 status; + u8 raw[0]; + } u; +} __packed; + +#endif /* __iwl_legacy_commands_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c new file mode 100644 index 000000000000..d418b647be80 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-core.c @@ -0,0 +1,2674 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/etherdevice.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <net/mac80211.h> + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-power.h" +#include "iwl-sta.h" +#include "iwl-helpers.h" + + +MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965"); +MODULE_VERSION(IWLWIFI_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); + +/* + * set bt_coex_active to true, uCode will do kill/defer + * every time the priority line is asserted (BT is sending signals on the + * priority line in the PCIx). + * set bt_coex_active to false, uCode will ignore the BT activity and + * perform the normal operation + * + * User might experience transmit issue on some platform due to WiFi/BT + * co-exist problem. The possible behaviors are: + * Able to scan and finding all the available AP + * Not able to associate with any AP + * On those platforms, WiFi communication can be restored by set + * "bt_coex_active" module parameter to "false" + * + * default: bt_coex_active = true (BT_COEX_ENABLE) + */ +static bool bt_coex_active = true; +module_param(bt_coex_active, bool, S_IRUGO); +MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); + +u32 iwlegacy_debug_level; +EXPORT_SYMBOL(iwlegacy_debug_level); + +const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; +EXPORT_SYMBOL(iwlegacy_bcast_addr); + + +/* This function both allocates and initializes hw and priv. */ +struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg) +{ + struct iwl_priv *priv; + /* mac80211 allocates memory for this device instance, including + * space for this driver's private structure */ + struct ieee80211_hw *hw; + + hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), + cfg->ops->ieee80211_ops); + if (hw == NULL) { + pr_err("%s: Can not allocate network device\n", + cfg->name); + goto out; + } + + priv = hw->priv; + priv->hw = hw; + +out: + return hw; +} +EXPORT_SYMBOL(iwl_legacy_alloc_all); + +#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ +#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ +static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv, + struct ieee80211_sta_ht_cap *ht_info, + enum ieee80211_band band) +{ + u16 max_bit_rate = 0; + u8 rx_chains_num = priv->hw_params.rx_chains_num; + u8 tx_chains_num = priv->hw_params.tx_chains_num; + + ht_info->cap = 0; + memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); + + ht_info->ht_supported = true; + + ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + max_bit_rate = MAX_BIT_RATE_20_MHZ; + if (priv->hw_params.ht40_channel & BIT(band)) { + ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_info->cap |= IEEE80211_HT_CAP_SGI_40; + ht_info->mcs.rx_mask[4] = 0x01; + max_bit_rate = MAX_BIT_RATE_40_MHZ; + } + + if (priv->cfg->mod_params->amsdu_size_8K) + ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; + ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; + + ht_info->mcs.rx_mask[0] = 0xFF; + if (rx_chains_num >= 2) + ht_info->mcs.rx_mask[1] = 0xFF; + if (rx_chains_num >= 3) + ht_info->mcs.rx_mask[2] = 0xFF; + + /* Highest supported Rx data rate */ + max_bit_rate *= rx_chains_num; + WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); + ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); + + /* Tx MCS capabilities */ + ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (tx_chains_num != rx_chains_num) { + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + ht_info->mcs.tx_params |= ((tx_chains_num - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + } +} + +/** + * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom + */ +int iwl_legacy_init_geos(struct iwl_priv *priv) +{ + struct iwl_channel_info *ch; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *channels; + struct ieee80211_channel *geo_ch; + struct ieee80211_rate *rates; + int i = 0; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || + priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { + IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n"); + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + return 0; + } + + channels = kzalloc(sizeof(struct ieee80211_channel) * + priv->channel_count, GFP_KERNEL); + if (!channels) + return -ENOMEM; + + rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY), + GFP_KERNEL); + if (!rates) { + kfree(channels); + return -ENOMEM; + } + + /* 5.2GHz channels start after the 2.4GHz channels */ + sband = &priv->bands[IEEE80211_BAND_5GHZ]; + sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)]; + /* just OFDM */ + sband->bitrates = &rates[IWL_FIRST_OFDM_RATE]; + sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; + + if (priv->cfg->sku & IWL_SKU_N) + iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap, + IEEE80211_BAND_5GHZ); + + sband = &priv->bands[IEEE80211_BAND_2GHZ]; + sband->channels = channels; + /* OFDM & CCK */ + sband->bitrates = rates; + sband->n_bitrates = IWL_RATE_COUNT_LEGACY; + + if (priv->cfg->sku & IWL_SKU_N) + iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap, + IEEE80211_BAND_2GHZ); + + priv->ieee_channels = channels; + priv->ieee_rates = rates; + + for (i = 0; i < priv->channel_count; i++) { + ch = &priv->channel_info[i]; + + if (!iwl_legacy_is_channel_valid(ch)) + continue; + + if (iwl_legacy_is_channel_a_band(ch)) + sband = &priv->bands[IEEE80211_BAND_5GHZ]; + else + sband = &priv->bands[IEEE80211_BAND_2GHZ]; + + geo_ch = &sband->channels[sband->n_channels++]; + + geo_ch->center_freq = + ieee80211_channel_to_frequency(ch->channel, ch->band); + geo_ch->max_power = ch->max_power_avg; + geo_ch->max_antenna_gain = 0xff; + geo_ch->hw_value = ch->channel; + + if (iwl_legacy_is_channel_valid(ch)) { + if (!(ch->flags & EEPROM_CHANNEL_IBSS)) + geo_ch->flags |= IEEE80211_CHAN_NO_IBSS; + + if (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) + geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN; + + if (ch->flags & EEPROM_CHANNEL_RADAR) + geo_ch->flags |= IEEE80211_CHAN_RADAR; + + geo_ch->flags |= ch->ht40_extension_channel; + + if (ch->max_power_avg > priv->tx_power_device_lmt) + priv->tx_power_device_lmt = ch->max_power_avg; + } else { + geo_ch->flags |= IEEE80211_CHAN_DISABLED; + } + + IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", + ch->channel, geo_ch->center_freq, + iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4", + geo_ch->flags & IEEE80211_CHAN_DISABLED ? + "restricted" : "valid", + geo_ch->flags); + } + + if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) && + priv->cfg->sku & IWL_SKU_A) { + IWL_INFO(priv, "Incorrectly detected BG card as ABG. " + "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n", + priv->pci_dev->device, + priv->pci_dev->subsystem_device); + priv->cfg->sku &= ~IWL_SKU_A; + } + + IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n", + priv->bands[IEEE80211_BAND_2GHZ].n_channels, + priv->bands[IEEE80211_BAND_5GHZ].n_channels); + + set_bit(STATUS_GEO_CONFIGURED, &priv->status); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_init_geos); + +/* + * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos + */ +void iwl_legacy_free_geos(struct iwl_priv *priv) +{ + kfree(priv->ieee_channels); + kfree(priv->ieee_rates); + clear_bit(STATUS_GEO_CONFIGURED, &priv->status); +} +EXPORT_SYMBOL(iwl_legacy_free_geos); + +static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv, + enum ieee80211_band band, + u16 channel, u8 extension_chan_offset) +{ + const struct iwl_channel_info *ch_info; + + ch_info = iwl_legacy_get_channel_info(priv, band, channel); + if (!iwl_legacy_is_channel_valid(ch_info)) + return false; + + if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE) + return !(ch_info->ht40_extension_channel & + IEEE80211_CHAN_NO_HT40PLUS); + else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW) + return !(ch_info->ht40_extension_channel & + IEEE80211_CHAN_NO_HT40MINUS); + + return false; +} + +bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_sta_ht_cap *ht_cap) +{ + if (!ctx->ht.enabled || !ctx->ht.is_40mhz) + return false; + + /* + * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 + * the bit will not set if it is pure 40MHz case + */ + if (ht_cap && !ht_cap->ht_supported) + return false; + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + if (priv->disable_ht40) + return false; +#endif + + return iwl_legacy_is_channel_extension(priv, priv->band, + le16_to_cpu(ctx->staging.channel), + ctx->ht.extension_chan_offset); +} +EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed); + +static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) +{ + u16 new_val; + u16 beacon_factor; + + /* + * If mac80211 hasn't given us a beacon interval, program + * the default into the device. + */ + if (!beacon_val) + return DEFAULT_BEACON_INTERVAL; + + /* + * If the beacon interval we obtained from the peer + * is too large, we'll have to wake up more often + * (and in IBSS case, we'll beacon too much) + * + * For example, if max_beacon_val is 4096, and the + * requested beacon interval is 7000, we'll have to + * use 3500 to be able to wake up on the beacons. + * + * This could badly influence beacon detection stats. + */ + + beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val; + new_val = beacon_val / beacon_factor; + + if (!new_val) + new_val = max_beacon_val; + + return new_val; +} + +int +iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + u64 tsf; + s32 interval_tm, rem; + struct ieee80211_conf *conf = NULL; + u16 beacon_int; + struct ieee80211_vif *vif = ctx->vif; + + conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); + + lockdep_assert_held(&priv->mutex); + + memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd)); + + ctx->timing.timestamp = cpu_to_le64(priv->timestamp); + ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); + + beacon_int = vif ? vif->bss_conf.beacon_int : 0; + + /* + * TODO: For IBSS we need to get atim_window from mac80211, + * for now just always use 0 + */ + ctx->timing.atim_window = 0; + + beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int, + priv->hw_params.max_beacon_itrvl * TIME_UNIT); + ctx->timing.beacon_interval = cpu_to_le16(beacon_int); + + tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */ + interval_tm = beacon_int * TIME_UNIT; + rem = do_div(tsf, interval_tm); + ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); + + ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1; + + IWL_DEBUG_ASSOC(priv, + "beacon interval %d beacon timer %d beacon tim %d\n", + le16_to_cpu(ctx->timing.beacon_interval), + le32_to_cpu(ctx->timing.beacon_init_val), + le16_to_cpu(ctx->timing.atim_window)); + + return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd, + sizeof(ctx->timing), &ctx->timing); +} +EXPORT_SYMBOL(iwl_legacy_send_rxon_timing); + +void +iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + int hw_decrypt) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + + if (hw_decrypt) + rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; + else + rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; + +} +EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto); + +/* validate RXON structure is valid */ +int +iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + bool error = false; + + if (rxon->flags & RXON_FLG_BAND_24G_MSK) { + if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) { + IWL_WARN(priv, "check 2.4G: wrong narrow\n"); + error = true; + } + if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) { + IWL_WARN(priv, "check 2.4G: wrong radar\n"); + error = true; + } + } else { + if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) { + IWL_WARN(priv, "check 5.2G: not short slot!\n"); + error = true; + } + if (rxon->flags & RXON_FLG_CCK_MSK) { + IWL_WARN(priv, "check 5.2G: CCK!\n"); + error = true; + } + } + if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) { + IWL_WARN(priv, "mac/bssid mcast!\n"); + error = true; + } + + /* make sure basic rates 6Mbps and 1Mbps are supported */ + if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 && + (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) { + IWL_WARN(priv, "neither 1 nor 6 are basic\n"); + error = true; + } + + if (le16_to_cpu(rxon->assoc_id) > 2007) { + IWL_WARN(priv, "aid > 2007\n"); + error = true; + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) { + IWL_WARN(priv, "CCK and short slot\n"); + error = true; + } + + if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) + == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) { + IWL_WARN(priv, "CCK and auto detect"); + error = true; + } + + if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | + RXON_FLG_TGG_PROTECT_MSK)) == + RXON_FLG_TGG_PROTECT_MSK) { + IWL_WARN(priv, "TGg but no auto-detect\n"); + error = true; + } + + if (error) + IWL_WARN(priv, "Tuning to channel %d\n", + le16_to_cpu(rxon->channel)); + + if (error) { + IWL_ERR(priv, "Invalid RXON\n"); + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd); + +/** + * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed + * @priv: staging_rxon is compared to active_rxon + * + * If the RXON structure is changing enough to require a new tune, + * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that + * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. + */ +int iwl_legacy_full_rxon_required(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + const struct iwl_legacy_rxon_cmd *staging = &ctx->staging; + const struct iwl_legacy_rxon_cmd *active = &ctx->active; + +#define CHK(cond) \ + if ((cond)) { \ + IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \ + return 1; \ + } + +#define CHK_NEQ(c1, c2) \ + if ((c1) != (c2)) { \ + IWL_DEBUG_INFO(priv, "need full RXON - " \ + #c1 " != " #c2 " - %d != %d\n", \ + (c1), (c2)); \ + return 1; \ + } + + /* These items are only settable from the full RXON command */ + CHK(!iwl_legacy_is_associated_ctx(ctx)); + CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr)); + CHK(compare_ether_addr(staging->node_addr, active->node_addr)); + CHK(compare_ether_addr(staging->wlap_bssid_addr, + active->wlap_bssid_addr)); + CHK_NEQ(staging->dev_type, active->dev_type); + CHK_NEQ(staging->channel, active->channel); + CHK_NEQ(staging->air_propagation, active->air_propagation); + CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates, + active->ofdm_ht_single_stream_basic_rates); + CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates, + active->ofdm_ht_dual_stream_basic_rates); + CHK_NEQ(staging->assoc_id, active->assoc_id); + + /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can + * be updated with the RXON_ASSOC command -- however only some + * flag transitions are allowed using RXON_ASSOC */ + + /* Check if we are not switching bands */ + CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK, + active->flags & RXON_FLG_BAND_24G_MSK); + + /* Check if we are switching association toggle */ + CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK, + active->filter_flags & RXON_FILTER_ASSOC_MSK); + +#undef CHK +#undef CHK_NEQ + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_full_rxon_required); + +u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + /* + * Assign the lowest rate -- should really get this from + * the beacon skb from mac80211. + */ + if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) + return IWL_RATE_1M_PLCP; + else + return IWL_RATE_6M_PLCP; +} +EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp); + +static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv, + struct iwl_ht_config *ht_conf, + struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + + if (!ctx->ht.enabled) { + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | + RXON_FLG_HT40_PROT_MSK | + RXON_FLG_HT_PROT_MSK); + return; + } + + rxon->flags |= cpu_to_le32(ctx->ht.protection << + RXON_FLG_HT_OPERATING_MODE_POS); + + /* Set up channel bandwidth: + * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ + /* clear the HT channel mode before set the mode */ + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) { + /* pure ht40 */ + if (ctx->ht.protection == + IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { + rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; + /* Note: control channel is opposite of extension channel */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + break; + } + } else { + /* Note: control channel is opposite of extension channel */ + switch (ctx->ht.extension_chan_offset) { + case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: + rxon->flags &= + ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_BELOW: + rxon->flags |= + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; + rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; + break; + case IEEE80211_HT_PARAM_CHA_SEC_NONE: + default: + /* channel location only valid if in Mixed mode */ + IWL_ERR(priv, + "invalid extension channel offset\n"); + break; + } + } + } else { + rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY; + } + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + + IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X " + "extension channel offset 0x%x\n", + le32_to_cpu(rxon->flags), ctx->ht.protection, + ctx->ht.extension_chan_offset); +} + +void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) +{ + struct iwl_rxon_context *ctx; + + for_each_context(priv, ctx) + _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx); +} +EXPORT_SYMBOL(iwl_legacy_set_rxon_ht); + +/* Return valid, unused, channel for a passive scan to reset the RF */ +u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv, + enum ieee80211_band band) +{ + const struct iwl_channel_info *ch_info; + int i; + u8 channel = 0; + u8 min, max; + struct iwl_rxon_context *ctx; + + if (band == IEEE80211_BAND_5GHZ) { + min = 14; + max = priv->channel_count; + } else { + min = 0; + max = 14; + } + + for (i = min; i < max; i++) { + bool busy = false; + + for_each_context(priv, ctx) { + busy = priv->channel_info[i].channel == + le16_to_cpu(ctx->staging.channel); + if (busy) + break; + } + + if (busy) + continue; + + channel = priv->channel_info[i].channel; + ch_info = iwl_legacy_get_channel_info(priv, band, channel); + if (iwl_legacy_is_channel_valid(ch_info)) + break; + } + + return channel; +} +EXPORT_SYMBOL(iwl_legacy_get_single_channel_number); + +/** + * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON + * @ch: requested channel as a pointer to struct ieee80211_channel + + * NOTE: Does not commit to the hardware; it sets appropriate bit fields + * in the staging RXON flag structure based on the ch->band + */ +int +iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, + struct iwl_rxon_context *ctx) +{ + enum ieee80211_band band = ch->band; + u16 channel = ch->hw_value; + + if ((le16_to_cpu(ctx->staging.channel) == channel) && + (priv->band == band)) + return 0; + + ctx->staging.channel = cpu_to_le16(channel); + if (band == IEEE80211_BAND_5GHZ) + ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; + else + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + + priv->band = band; + + IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_set_rxon_channel); + +void iwl_legacy_set_flags_for_band(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + enum ieee80211_band band, + struct ieee80211_vif *vif) +{ + if (band == IEEE80211_BAND_5GHZ) { + ctx->staging.flags &= + ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK + | RXON_FLG_CCK_MSK); + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + } else { + /* Copied from iwl_post_associate() */ + if (vif && vif->bss_conf.use_short_slot) + ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; + + ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; + ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; + ctx->staging.flags &= ~RXON_FLG_CCK_MSK; + } +} +EXPORT_SYMBOL(iwl_legacy_set_flags_for_band); + +/* + * initialize rxon structure with default values from eeprom + */ +void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + const struct iwl_channel_info *ch_info; + + memset(&ctx->staging, 0, sizeof(ctx->staging)); + + if (!ctx->vif) { + ctx->staging.dev_type = ctx->unused_devtype; + } else + switch (ctx->vif->type) { + + case NL80211_IFTYPE_STATION: + ctx->staging.dev_type = ctx->station_devtype; + ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; + break; + + case NL80211_IFTYPE_ADHOC: + ctx->staging.dev_type = ctx->ibss_devtype; + ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; + ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK | + RXON_FILTER_ACCEPT_GRP_MSK; + break; + + default: + IWL_ERR(priv, "Unsupported interface type %d\n", + ctx->vif->type); + break; + } + +#if 0 + /* TODO: Figure out when short_preamble would be set and cache from + * that */ + if (!hw_to_local(priv->hw)->short_preamble) + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; +#endif + + ch_info = iwl_legacy_get_channel_info(priv, priv->band, + le16_to_cpu(ctx->active.channel)); + + if (!ch_info) + ch_info = &priv->channel_info[0]; + + ctx->staging.channel = cpu_to_le16(ch_info->channel); + priv->band = ch_info->band; + + iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif); + + ctx->staging.ofdm_basic_rates = + (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + ctx->staging.cck_basic_rates = + (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + /* clear both MIX and PURE40 mode flag */ + ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | + RXON_FLG_CHANNEL_MODE_PURE_40); + if (ctx->vif) + memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); + + ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; + ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; +} +EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config); + +void iwl_legacy_set_rate(struct iwl_priv *priv) +{ + const struct ieee80211_supported_band *hw = NULL; + struct ieee80211_rate *rate; + struct iwl_rxon_context *ctx; + int i; + + hw = iwl_get_hw_mode(priv, priv->band); + if (!hw) { + IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n"); + return; + } + + priv->active_rate = 0; + + for (i = 0; i < hw->n_bitrates; i++) { + rate = &(hw->bitrates[i]); + if (rate->hw_value < IWL_RATE_COUNT_LEGACY) + priv->active_rate |= (1 << rate->hw_value); + } + + IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate); + + for_each_context(priv, ctx) { + ctx->staging.cck_basic_rates = + (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; + + ctx->staging.ofdm_basic_rates = + (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; + } +} +EXPORT_SYMBOL(iwl_legacy_set_rate); + +void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (priv->switch_rxon.switch_in_progress) { + ieee80211_chswitch_done(ctx->vif, is_success); + mutex_lock(&priv->mutex); + priv->switch_rxon.switch_in_progress = false; + mutex_unlock(&priv->mutex); + } +} +EXPORT_SYMBOL(iwl_legacy_chswitch_done); + +void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_csa_notification *csa = &(pkt->u.csa_notif); + + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active; + + if (priv->switch_rxon.switch_in_progress) { + if (!le32_to_cpu(csa->status) && + (csa->channel == priv->switch_rxon.channel)) { + rxon->channel = csa->channel; + ctx->staging.channel = csa->channel; + IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", + le16_to_cpu(csa->channel)); + iwl_legacy_chswitch_done(priv, true); + } else { + IWL_ERR(priv, "CSA notif (fail) : channel %d\n", + le16_to_cpu(csa->channel)); + iwl_legacy_chswitch_done(priv, false); + } + } +} +EXPORT_SYMBOL(iwl_legacy_rx_csa); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_rxon_cmd *rxon = &ctx->staging; + + IWL_DEBUG_RADIO(priv, "RX CONFIG:\n"); + iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); + IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", + le16_to_cpu(rxon->channel)); + IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); + IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n", + le32_to_cpu(rxon->filter_flags)); + IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type); + IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n", + rxon->ofdm_basic_rates); + IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", + rxon->cck_basic_rates); + IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr); + IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); + IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", + le16_to_cpu(rxon->assoc_id)); +} +EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd); +#endif +/** + * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card + */ +void iwl_legacy_irq_handle_error(struct iwl_priv *priv) +{ + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + + /* Cancel currently queued command. */ + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + + IWL_ERR(priv, "Loaded firmware version: %s\n", + priv->hw->wiphy->fw_version); + + priv->cfg->ops->lib->dump_nic_error_log(priv); + if (priv->cfg->ops->lib->dump_fh) + priv->cfg->ops->lib->dump_fh(priv, NULL, false); + priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) + iwl_legacy_print_rx_config_cmd(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); +#endif + + wake_up_interruptible(&priv->wait_command_queue); + + /* Keep the restart process from trying to send host + * commands by clearing the INIT status bit */ + clear_bit(STATUS_READY, &priv->status); + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_DEBUG(priv, IWL_DL_FW_ERRORS, + "Restarting adapter due to uCode error.\n"); + + if (priv->cfg->mod_params->restart_fw) + queue_work(priv->workqueue, &priv->restart); + } +} +EXPORT_SYMBOL(iwl_legacy_irq_handle_error); + +static int iwl_legacy_apm_stop_master(struct iwl_priv *priv) +{ + int ret = 0; + + /* stop device's busmaster DMA activity */ + iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); + + ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, + CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); + if (ret) + IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n"); + + IWL_DEBUG_INFO(priv, "stop master\n"); + + return ret; +} + +void iwl_legacy_apm_stop(struct iwl_priv *priv) +{ + IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n"); + + /* Stop device's DMA activity */ + iwl_legacy_apm_stop_master(priv); + + /* Reset the entire device */ + iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); + + udelay(10); + + /* + * Clear "initialization complete" bit to move adapter from + * D0A* (powered-up Active) --> D0U* (Uninitialized) state. + */ + iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); +} +EXPORT_SYMBOL(iwl_legacy_apm_stop); + + +/* + * Start up NIC's basic functionality after it has been reset + * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop()) + * NOTE: This does not load uCode nor start the embedded processor + */ +int iwl_legacy_apm_init(struct iwl_priv *priv) +{ + int ret = 0; + u16 lctl; + + IWL_DEBUG_INFO(priv, "Init card's basic functions\n"); + + /* + * Use "set_bit" below rather than "write", to preserve any hardware + * bits already set by default after reset. + */ + + /* Disable L0S exit timer (platform NMI Work/Around) */ + iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); + + /* + * Disable L0s without affecting L1; + * don't wait for ICH L0s (ICH bug W/A) + */ + iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS, + CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); + + /* Set FH wait threshold to maximum (HW error during stress W/A) */ + iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG, + CSR_DBG_HPET_MEM_REG_VAL); + + /* + * Enable HAP INTA (interrupt from management bus) to + * wake device's PCI Express link L1a -> L0s + * NOTE: This is no-op for 3945 (non-existant bit) + */ + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); + + /* + * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition. + * Check if BIOS (or OS) enabled L1-ASPM on this device. + * If so (likely), disable L0S, so device moves directly L0->L1; + * costs negligible amount of power savings. + * If not (unlikely), enable L0S, so there is at least some + * power savings, even without L1. + */ + if (priv->cfg->base_params->set_l0s) { + lctl = iwl_legacy_pcie_link_ctl(priv); + if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == + PCI_CFG_LINK_CTRL_VAL_L1_EN) { + /* L1-ASPM enabled; disable(!) L0S */ + iwl_legacy_set_bit(priv, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n"); + } else { + /* L1-ASPM disabled; enable(!) L0S */ + iwl_legacy_clear_bit(priv, CSR_GIO_REG, + CSR_GIO_REG_VAL_L0S_ENABLED); + IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n"); + } + } + + /* Configure analog phase-lock-loop before activating to D0A */ + if (priv->cfg->base_params->pll_cfg_val) + iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG, + priv->cfg->base_params->pll_cfg_val); + + /* + * Set "initialization complete" bit to move adapter from + * D0U* --> D0A* (powered-up active) state. + */ + iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + + /* + * Wait for clock stabilization; once stabilized, access to + * device-internal resources is supported, e.g. iwl_legacy_write_prph() + * and accesses to uCode SRAM. + */ + ret = iwl_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); + if (ret < 0) { + IWL_DEBUG_INFO(priv, "Failed to init the card\n"); + goto out; + } + + /* + * Enable DMA and BSM (if used) clocks, wait for them to stabilize. + * BSM (Boostrap State Machine) is only in 3945 and 4965. + * + * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits + * do not disable clocks. This preserves any hardware bits already + * set by default in "CLK_CTRL_REG" after reset. + */ + if (priv->cfg->base_params->use_bsm) + iwl_legacy_write_prph(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT); + else + iwl_legacy_write_prph(priv, APMG_CLK_EN_REG, + APMG_CLK_VAL_DMA_CLK_RQT); + udelay(20); + + /* Disable L1-Active */ + iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG, + APMG_PCIDEV_STT_VAL_L1_ACT_DIS); + +out: + return ret; +} +EXPORT_SYMBOL(iwl_legacy_apm_init); + + +int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) +{ + int ret; + s8 prev_tx_power; + bool defer; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + lockdep_assert_held(&priv->mutex); + + if (priv->tx_power_user_lmt == tx_power && !force) + return 0; + + if (!priv->cfg->ops->lib->send_tx_power) + return -EOPNOTSUPP; + + if (tx_power < IWL4965_TX_POWER_TARGET_POWER_MIN) { + IWL_WARN(priv, + "Requested user TXPOWER %d below lower limit %d.\n", + tx_power, + IWL4965_TX_POWER_TARGET_POWER_MIN); + return -EINVAL; + } + + if (tx_power > priv->tx_power_device_lmt) { + IWL_WARN(priv, + "Requested user TXPOWER %d above upper limit %d.\n", + tx_power, priv->tx_power_device_lmt); + return -EINVAL; + } + + if (!iwl_legacy_is_ready_rf(priv)) + return -EIO; + + /* scan complete and commit_rxon use tx_power_next value, + * it always need to be updated for newest request */ + priv->tx_power_next = tx_power; + + /* do not set tx power when scanning or channel changing */ + defer = test_bit(STATUS_SCANNING, &priv->status) || + memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); + if (defer && !force) { + IWL_DEBUG_INFO(priv, "Deferring tx power set\n"); + return 0; + } + + prev_tx_power = priv->tx_power_user_lmt; + priv->tx_power_user_lmt = tx_power; + + ret = priv->cfg->ops->lib->send_tx_power(priv); + + /* if fail to set tx_power, restore the orig. tx power */ + if (ret) { + priv->tx_power_user_lmt = prev_tx_power; + priv->tx_power_next = prev_tx_power; + } + return ret; +} +EXPORT_SYMBOL(iwl_legacy_set_tx_power); + +void iwl_legacy_send_bt_config(struct iwl_priv *priv) +{ + struct iwl_bt_cmd bt_cmd = { + .lead_time = BT_LEAD_TIME_DEF, + .max_kill = BT_MAX_KILL_DEF, + .kill_ack_mask = 0, + .kill_cts_mask = 0, + }; + + if (!bt_coex_active) + bt_cmd.flags = BT_COEX_DISABLE; + else + bt_cmd.flags = BT_COEX_ENABLE; + + IWL_DEBUG_INFO(priv, "BT coex %s\n", + (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); + + if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG, + sizeof(struct iwl_bt_cmd), &bt_cmd)) + IWL_ERR(priv, "failed to send BT Coex Config\n"); +} +EXPORT_SYMBOL(iwl_legacy_send_bt_config); + +int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) +{ + struct iwl_statistics_cmd statistics_cmd = { + .configuration_flags = + clear ? IWL_STATS_CONF_CLEAR_STATS : 0, + }; + + if (flags & CMD_ASYNC) + return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd, NULL); + else + return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, + sizeof(struct iwl_statistics_cmd), + &statistics_cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_statistics_request); + +void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); + IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", + sleep->pm_sleep_mode, sleep->pm_wakeup_src); +#endif +} +EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif); + +void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " + "notification for %s:\n", len, + iwl_legacy_get_cmd_string(pkt->hdr.cmd)); + iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); +} +EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif); + +void iwl_legacy_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " + "seq 0x%04X ser 0x%08X\n", + le32_to_cpu(pkt->u.err_resp.error_type), + iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id), + pkt->u.err_resp.cmd_id, + le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), + le32_to_cpu(pkt->u.err_resp.error_info)); +} +EXPORT_SYMBOL(iwl_legacy_rx_reply_error); + +void iwl_legacy_clear_isr_stats(struct iwl_priv *priv) +{ + memset(&priv->isr_stats, 0, sizeof(priv->isr_stats)); +} + +int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx; + unsigned long flags; + int q; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return -EIO; + } + + if (queue >= AC_NUM) { + IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue); + return 0; + } + + q = AC_NUM - 1 - queue; + + spin_lock_irqsave(&priv->lock, flags); + + for_each_context(priv, ctx) { + ctx->qos_data.def_qos_parm.ac[q].cw_min = + cpu_to_le16(params->cw_min); + ctx->qos_data.def_qos_parm.ac[q].cw_max = + cpu_to_le16(params->cw_max); + ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; + ctx->qos_data.def_qos_parm.ac[q].edca_txop = + cpu_to_le16((params->txop * 32)); + + ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; + } + + spin_unlock_irqrestore(&priv->lock, flags); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} +EXPORT_SYMBOL(iwl_legacy_mac_conf_tx); + +int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + return priv->ibss_manager == IWL_IBSS_MANAGER; +} +EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon); + +static int +iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + iwl_legacy_connection_init_rx_config(priv, ctx); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + + return iwl_legacy_commit_rxon(priv, ctx); +} + +static int iwl_legacy_setup_interface(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + struct ieee80211_vif *vif = ctx->vif; + int err; + + lockdep_assert_held(&priv->mutex); + + /* + * This variable will be correct only when there's just + * a single context, but all code using it is for hardware + * that supports only one context. + */ + priv->iw_mode = vif->type; + + ctx->is_active = true; + + err = iwl_legacy_set_mode(priv, ctx); + if (err) { + if (!ctx->always_active) + ctx->is_active = false; + return err; + } + + return 0; +} + +int +iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *tmp, *ctx = NULL; + int err; + + IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", + vif->type, vif->addr); + + mutex_lock(&priv->mutex); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_WARN(priv, "Try to add interface when device not ready\n"); + err = -EINVAL; + goto out; + } + + for_each_context(priv, tmp) { + u32 possible_modes = + tmp->interface_modes | tmp->exclusive_interface_modes; + + if (tmp->vif) { + /* check if this busy context is exclusive */ + if (tmp->exclusive_interface_modes & + BIT(tmp->vif->type)) { + err = -EINVAL; + goto out; + } + continue; + } + + if (!(possible_modes & BIT(vif->type))) + continue; + + /* have maybe usable context w/o interface */ + ctx = tmp; + break; + } + + if (!ctx) { + err = -EOPNOTSUPP; + goto out; + } + + vif_priv->ctx = ctx; + ctx->vif = vif; + + err = iwl_legacy_setup_interface(priv, ctx); + if (!err) + goto out; + + ctx->vif = NULL; + priv->iw_mode = NL80211_IFTYPE_STATION; + out: + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + return err; +} +EXPORT_SYMBOL(iwl_legacy_mac_add_interface); + +static void iwl_legacy_teardown_interface(struct iwl_priv *priv, + struct ieee80211_vif *vif, + bool mode_change) +{ + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + lockdep_assert_held(&priv->mutex); + + if (priv->scan_vif == vif) { + iwl_legacy_scan_cancel_timeout(priv, 200); + iwl_legacy_force_scan_end(priv); + } + + if (!mode_change) { + iwl_legacy_set_mode(priv, ctx); + if (!ctx->always_active) + ctx->is_active = false; + } +} + +void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + mutex_lock(&priv->mutex); + + WARN_ON(ctx->vif != vif); + ctx->vif = NULL; + + iwl_legacy_teardown_interface(priv, vif, false); + + memset(priv->bssid, 0, ETH_ALEN); + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + +} +EXPORT_SYMBOL(iwl_legacy_mac_remove_interface); + +int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv) +{ + if (!priv->txq) + priv->txq = kzalloc( + sizeof(struct iwl_tx_queue) * + priv->cfg->base_params->num_of_queues, + GFP_KERNEL); + if (!priv->txq) { + IWL_ERR(priv, "Not enough memory for txq\n"); + return -ENOMEM; + } + return 0; +} +EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem); + +void iwl_legacy_txq_mem(struct iwl_priv *priv) +{ + kfree(priv->txq); + priv->txq = NULL; +} +EXPORT_SYMBOL(iwl_legacy_txq_mem); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + +#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES) + +void iwl_legacy_reset_traffic_log(struct iwl_priv *priv) +{ + priv->tx_traffic_idx = 0; + priv->rx_traffic_idx = 0; + if (priv->tx_traffic) + memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); + if (priv->rx_traffic) + memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE); +} + +int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv) +{ + u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE; + + if (iwlegacy_debug_level & IWL_DL_TX) { + if (!priv->tx_traffic) { + priv->tx_traffic = + kzalloc(traffic_size, GFP_KERNEL); + if (!priv->tx_traffic) + return -ENOMEM; + } + } + if (iwlegacy_debug_level & IWL_DL_RX) { + if (!priv->rx_traffic) { + priv->rx_traffic = + kzalloc(traffic_size, GFP_KERNEL); + if (!priv->rx_traffic) + return -ENOMEM; + } + } + iwl_legacy_reset_traffic_log(priv); + return 0; +} +EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem); + +void iwl_legacy_free_traffic_mem(struct iwl_priv *priv) +{ + kfree(priv->tx_traffic); + priv->tx_traffic = NULL; + + kfree(priv->rx_traffic); + priv->rx_traffic = NULL; +} +EXPORT_SYMBOL(iwl_legacy_free_traffic_mem); + +void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ + __le16 fc; + u16 len; + + if (likely(!(iwlegacy_debug_level & IWL_DL_TX))) + return; + + if (!priv->tx_traffic) + return; + + fc = header->frame_control; + if (ieee80211_is_data(fc)) { + len = (length > IWL_TRAFFIC_ENTRY_SIZE) + ? IWL_TRAFFIC_ENTRY_SIZE : length; + memcpy((priv->tx_traffic + + (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), + header, len); + priv->tx_traffic_idx = + (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; + } +} +EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame); + +void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ + __le16 fc; + u16 len; + + if (likely(!(iwlegacy_debug_level & IWL_DL_RX))) + return; + + if (!priv->rx_traffic) + return; + + fc = header->frame_control; + if (ieee80211_is_data(fc)) { + len = (length > IWL_TRAFFIC_ENTRY_SIZE) + ? IWL_TRAFFIC_ENTRY_SIZE : length; + memcpy((priv->rx_traffic + + (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)), + header, len); + priv->rx_traffic_idx = + (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; + } +} +EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame); + +const char *iwl_legacy_get_mgmt_string(int cmd) +{ + switch (cmd) { + IWL_CMD(MANAGEMENT_ASSOC_REQ); + IWL_CMD(MANAGEMENT_ASSOC_RESP); + IWL_CMD(MANAGEMENT_REASSOC_REQ); + IWL_CMD(MANAGEMENT_REASSOC_RESP); + IWL_CMD(MANAGEMENT_PROBE_REQ); + IWL_CMD(MANAGEMENT_PROBE_RESP); + IWL_CMD(MANAGEMENT_BEACON); + IWL_CMD(MANAGEMENT_ATIM); + IWL_CMD(MANAGEMENT_DISASSOC); + IWL_CMD(MANAGEMENT_AUTH); + IWL_CMD(MANAGEMENT_DEAUTH); + IWL_CMD(MANAGEMENT_ACTION); + default: + return "UNKNOWN"; + + } +} + +const char *iwl_legacy_get_ctrl_string(int cmd) +{ + switch (cmd) { + IWL_CMD(CONTROL_BACK_REQ); + IWL_CMD(CONTROL_BACK); + IWL_CMD(CONTROL_PSPOLL); + IWL_CMD(CONTROL_RTS); + IWL_CMD(CONTROL_CTS); + IWL_CMD(CONTROL_ACK); + IWL_CMD(CONTROL_CFEND); + IWL_CMD(CONTROL_CFENDACK); + default: + return "UNKNOWN"; + + } +} + +void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv) +{ + memset(&priv->tx_stats, 0, sizeof(struct traffic_stats)); + memset(&priv->rx_stats, 0, sizeof(struct traffic_stats)); +} + +/* + * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined, + * iwl_legacy_update_stats function will + * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass + * Use debugFs to display the rx/rx_statistics + * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL + * information will be recorded, but DATA pkt still will be recorded + * for the reason of iwl_led.c need to control the led blinking based on + * number of tx and rx data. + * + */ +void +iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len) +{ + struct traffic_stats *stats; + + if (is_tx) + stats = &priv->tx_stats; + else + stats = &priv->rx_stats; + + if (ieee80211_is_mgmt(fc)) { + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + stats->mgmt[MANAGEMENT_ASSOC_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): + stats->mgmt[MANAGEMENT_ASSOC_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + stats->mgmt[MANAGEMENT_REASSOC_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): + stats->mgmt[MANAGEMENT_REASSOC_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): + stats->mgmt[MANAGEMENT_PROBE_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): + stats->mgmt[MANAGEMENT_PROBE_RESP]++; + break; + case cpu_to_le16(IEEE80211_STYPE_BEACON): + stats->mgmt[MANAGEMENT_BEACON]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ATIM): + stats->mgmt[MANAGEMENT_ATIM]++; + break; + case cpu_to_le16(IEEE80211_STYPE_DISASSOC): + stats->mgmt[MANAGEMENT_DISASSOC]++; + break; + case cpu_to_le16(IEEE80211_STYPE_AUTH): + stats->mgmt[MANAGEMENT_AUTH]++; + break; + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + stats->mgmt[MANAGEMENT_DEAUTH]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ACTION): + stats->mgmt[MANAGEMENT_ACTION]++; + break; + } + } else if (ieee80211_is_ctl(fc)) { + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_BACK_REQ): + stats->ctrl[CONTROL_BACK_REQ]++; + break; + case cpu_to_le16(IEEE80211_STYPE_BACK): + stats->ctrl[CONTROL_BACK]++; + break; + case cpu_to_le16(IEEE80211_STYPE_PSPOLL): + stats->ctrl[CONTROL_PSPOLL]++; + break; + case cpu_to_le16(IEEE80211_STYPE_RTS): + stats->ctrl[CONTROL_RTS]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CTS): + stats->ctrl[CONTROL_CTS]++; + break; + case cpu_to_le16(IEEE80211_STYPE_ACK): + stats->ctrl[CONTROL_ACK]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CFEND): + stats->ctrl[CONTROL_CFEND]++; + break; + case cpu_to_le16(IEEE80211_STYPE_CFENDACK): + stats->ctrl[CONTROL_CFENDACK]++; + break; + } + } else { + /* data */ + stats->data_cnt++; + stats->data_bytes += len; + } +} +EXPORT_SYMBOL(iwl_legacy_update_stats); +#endif + +static void _iwl_legacy_force_rf_reset(struct iwl_priv *priv) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!iwl_legacy_is_any_associated(priv)) { + IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n"); + return; + } + /* + * There is no easy and better way to force reset the radio, + * the only known method is switching channel which will force to + * reset and tune the radio. + * Use internal short scan (single channel) operation to should + * achieve this objective. + * Driver should reset the radio when number of consecutive missed + * beacon, or any other uCode error condition detected. + */ + IWL_DEBUG_INFO(priv, "perform radio reset.\n"); + iwl_legacy_internal_short_hw_scan(priv); +} + + +int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external) +{ + struct iwl_force_reset *force_reset; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EINVAL; + + if (mode >= IWL_MAX_FORCE_RESET) { + IWL_DEBUG_INFO(priv, "invalid reset request.\n"); + return -EINVAL; + } + force_reset = &priv->force_reset[mode]; + force_reset->reset_request_count++; + if (!external) { + if (force_reset->last_force_reset_jiffies && + time_after(force_reset->last_force_reset_jiffies + + force_reset->reset_duration, jiffies)) { + IWL_DEBUG_INFO(priv, "force reset rejected\n"); + force_reset->reset_reject_count++; + return -EAGAIN; + } + } + force_reset->reset_success_count++; + force_reset->last_force_reset_jiffies = jiffies; + IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode); + switch (mode) { + case IWL_RF_RESET: + _iwl_legacy_force_rf_reset(priv); + break; + case IWL_FW_RESET: + /* + * if the request is from external(ex: debugfs), + * then always perform the request in regardless the module + * parameter setting + * if the request is from internal (uCode error or driver + * detect failure), then fw_restart module parameter + * need to be check before performing firmware reload + */ + if (!external && !priv->cfg->mod_params->restart_fw) { + IWL_DEBUG_INFO(priv, "Cancel firmware reload based on " + "module parameter setting\n"); + break; + } + IWL_ERR(priv, "On demand firmware reload\n"); + /* Set the FW error flag -- cleared on iwl_down */ + set_bit(STATUS_FW_ERROR, &priv->status); + wake_up_interruptible(&priv->wait_command_queue); + /* + * Keep the restart process from trying to send host + * commands by clearing the INIT status bit + */ + clear_bit(STATUS_READY, &priv->status); + queue_work(priv->workqueue, &priv->restart); + break; + } + return 0; +} + +int +iwl_legacy_mac_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + struct iwl_rxon_context *tmp; + u32 interface_modes; + int err; + + newtype = ieee80211_iftype_p2p(newtype, newp2p); + + mutex_lock(&priv->mutex); + + interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes; + + if (!(interface_modes & BIT(newtype))) { + err = -EBUSY; + goto out; + } + + if (ctx->exclusive_interface_modes & BIT(newtype)) { + for_each_context(priv, tmp) { + if (ctx == tmp) + continue; + + if (!tmp->vif) + continue; + + /* + * The current mode switch would be exclusive, but + * another context is active ... refuse the switch. + */ + err = -EBUSY; + goto out; + } + } + + /* success */ + iwl_legacy_teardown_interface(priv, vif, true); + vif->type = newtype; + err = iwl_legacy_setup_interface(priv, ctx); + WARN_ON(err); + /* + * We've switched internally, but submitting to the + * device may have failed for some reason. Mask this + * error, because otherwise mac80211 will not switch + * (and set the interface type back) and we'll be + * out of sync with it. + */ + err = 0; + + out: + mutex_unlock(&priv->mutex); + return err; +} +EXPORT_SYMBOL(iwl_legacy_mac_change_interface); + +/* + * On every watchdog tick we check (latest) time stamp. If it does not + * change during timeout period and queue is not empty we reset firmware. + */ +static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt) +{ + struct iwl_tx_queue *txq = &priv->txq[cnt]; + struct iwl_queue *q = &txq->q; + unsigned long timeout; + int ret; + + if (q->read_ptr == q->write_ptr) { + txq->time_stamp = jiffies; + return 0; + } + + timeout = txq->time_stamp + + msecs_to_jiffies(priv->cfg->base_params->wd_timeout); + + if (time_after(jiffies, timeout)) { + IWL_ERR(priv, "Queue %d stuck for %u ms.\n", + q->id, priv->cfg->base_params->wd_timeout); + ret = iwl_legacy_force_reset(priv, IWL_FW_RESET, false); + return (ret == -EAGAIN) ? 0 : 1; + } + + return 0; +} + +/* + * Making watchdog tick be a quarter of timeout assure we will + * discover the queue hung between timeout and 1.25*timeout + */ +#define IWL_WD_TICK(timeout) ((timeout) / 4) + +/* + * Watchdog timer callback, we check each tx queue for stuck, if if hung + * we reset the firmware. If everything is fine just rearm the timer. + */ +void iwl_legacy_bg_watchdog(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + int cnt; + unsigned long timeout; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + timeout = priv->cfg->base_params->wd_timeout; + if (timeout == 0) + return; + + /* monitor and check for stuck cmd queue */ + if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue)) + return; + + /* monitor and check for other stuck queues */ + if (iwl_legacy_is_any_associated(priv)) { + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + /* skip as we already checked the command queue */ + if (cnt == priv->cmd_queue) + continue; + if (iwl_legacy_check_stuck_queue(priv, cnt)) + return; + } + } + + mod_timer(&priv->watchdog, jiffies + + msecs_to_jiffies(IWL_WD_TICK(timeout))); +} +EXPORT_SYMBOL(iwl_legacy_bg_watchdog); + +void iwl_legacy_setup_watchdog(struct iwl_priv *priv) +{ + unsigned int timeout = priv->cfg->base_params->wd_timeout; + + if (timeout) + mod_timer(&priv->watchdog, + jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); + else + del_timer(&priv->watchdog); +} +EXPORT_SYMBOL(iwl_legacy_setup_watchdog); + +/* + * extended beacon time format + * time in usec will be changed into a 32-bit value in extended:internal format + * the extended part is the beacon counts + * the internal part is the time in usec within one beacon interval + */ +u32 +iwl_legacy_usecs_to_beacons(struct iwl_priv *priv, + u32 usec, u32 beacon_interval) +{ + u32 quot; + u32 rem; + u32 interval = beacon_interval * TIME_UNIT; + + if (!interval || !usec) + return 0; + + quot = (usec / interval) & + (iwl_legacy_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits) >> + priv->hw_params.beacon_time_tsf_bits); + rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + + return (quot << priv->hw_params.beacon_time_tsf_bits) + rem; +} +EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons); + +/* base is usually what we get from ucode with each received frame, + * the same as HW timer counter counting down + */ +__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base, + u32 addon, u32 beacon_interval) +{ + u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv, + priv->hw_params.beacon_time_tsf_bits); + u32 interval = beacon_interval * TIME_UNIT; + u32 res = (base & iwl_legacy_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits)) + + (addon & iwl_legacy_beacon_time_mask_high(priv, + priv->hw_params.beacon_time_tsf_bits)); + + if (base_low > addon_low) + res += base_low - addon_low; + else if (base_low < addon_low) { + res += interval + base_low - addon_low; + res += (1 << priv->hw_params.beacon_time_tsf_bits); + } else + res += (1 << priv->hw_params.beacon_time_tsf_bits); + + return cpu_to_le32(res); +} +EXPORT_SYMBOL(iwl_legacy_add_beacon_time); + +#ifdef CONFIG_PM + +int iwl_legacy_pci_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_priv *priv = pci_get_drvdata(pdev); + + /* + * This function is called when system goes into suspend state + * mac80211 will call iwl_mac_stop() from the mac80211 suspend function + * first but since iwl_mac_stop() has no knowledge of who the caller is, + * it will not call apm_ops.stop() to stop the DMA operation. + * Calling apm_ops.stop here to make sure we stop the DMA. + */ + iwl_legacy_apm_stop(priv); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_pci_suspend); + +int iwl_legacy_pci_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_priv *priv = pci_get_drvdata(pdev); + bool hw_rfkill = false; + + /* + * We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + iwl_legacy_enable_interrupts(priv); + + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rfkill = true; + + if (hw_rfkill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill); + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_pci_resume); + +const struct dev_pm_ops iwl_legacy_pm_ops = { + .suspend = iwl_legacy_pci_suspend, + .resume = iwl_legacy_pci_resume, + .freeze = iwl_legacy_pci_suspend, + .thaw = iwl_legacy_pci_resume, + .poweroff = iwl_legacy_pci_suspend, + .restore = iwl_legacy_pci_resume, +}; +EXPORT_SYMBOL(iwl_legacy_pm_ops); + +#endif /* CONFIG_PM */ + +static void +iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (!ctx->is_active) + return; + + ctx->qos_data.def_qos_parm.qos_flags = 0; + + if (ctx->qos_data.qos_active) + ctx->qos_data.def_qos_parm.qos_flags |= + QOS_PARAM_FLG_UPDATE_EDCA_MSK; + + if (ctx->ht.enabled) + ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; + + IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", + ctx->qos_data.qos_active, + ctx->qos_data.def_qos_parm.qos_flags); + + iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd, + sizeof(struct iwl_qosparam_cmd), + &ctx->qos_data.def_qos_parm, NULL); +} + +/** + * iwl_legacy_mac_config - mac80211 config callback + */ +int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = conf->channel; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + struct iwl_rxon_context *ctx; + unsigned long flags = 0; + int ret = 0; + u16 ch; + int scan_active = 0; + bool ht_changed[NUM_IWL_RXON_CTX] = {}; + + if (WARN_ON(!priv->cfg->ops->legacy)) + return -EOPNOTSUPP; + + mutex_lock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n", + channel->hw_value, changed); + + if (unlikely(!priv->cfg->mod_params->disable_hw_scan && + test_bit(STATUS_SCANNING, &priv->status))) { + scan_active = 1; + IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); + } + + if (changed & (IEEE80211_CONF_CHANGE_SMPS | + IEEE80211_CONF_CHANGE_CHANNEL)) { + /* mac80211 uses static for non-HT which is what we want */ + priv->current_ht_config.smps = conf->smps_mode; + + /* + * Recalculate chain counts. + * + * If monitor mode is enabled then mac80211 will + * set up the SM PS mode to OFF if an HT channel is + * configured. + */ + if (priv->cfg->ops->hcmd->set_rxon_chain) + for_each_context(priv, ctx) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + } + + /* during scanning mac80211 will delay channel setting until + * scan finish with changed = 0 + */ + if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { + if (scan_active) + goto set_ch_out; + + ch = channel->hw_value; + ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n"); + ret = -EINVAL; + goto set_ch_out; + } + + spin_lock_irqsave(&priv->lock, flags); + + for_each_context(priv, ctx) { + /* Configure HT40 channels */ + if (ctx->ht.enabled != conf_is_ht(conf)) { + ctx->ht.enabled = conf_is_ht(conf); + ht_changed[ctx->ctxid] = true; + } + if (ctx->ht.enabled) { + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; + } + } else + ctx->ht.is_40mhz = false; + + /* + * Default to no protection. Protection mode will + * later be set from BSS config in iwl_ht_conf + */ + ctx->ht.protection = + IEEE80211_HT_OP_MODE_PROTECTION_NONE; + + /* if we are switching from ht to 2.4 clear flags + * from any ht related info since 2.4 does not + * support ht */ + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + iwl_legacy_set_rxon_channel(priv, channel, ctx); + iwl_legacy_set_rxon_ht(priv, ht_conf); + + iwl_legacy_set_flags_for_band(priv, ctx, channel->band, + ctx->vif); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + if (priv->cfg->ops->legacy->update_bcast_stations) + ret = + priv->cfg->ops->legacy->update_bcast_stations(priv); + + set_ch_out: + /* The list of supported rates and rate mask can be different + * for each band; since the band may have changed, reset + * the rate mask to what mac80211 lists */ + iwl_legacy_set_rate(priv); + } + + if (changed & (IEEE80211_CONF_CHANGE_PS | + IEEE80211_CONF_CHANGE_IDLE)) { + ret = iwl_legacy_power_update_mode(priv, false); + if (ret) + IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); + } + + if (changed & IEEE80211_CONF_CHANGE_POWER) { + IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", + priv->tx_power_user_lmt, conf->power_level); + + iwl_legacy_set_tx_power(priv, conf->power_level, false); + } + + if (!iwl_legacy_is_ready(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + goto out; + } + + if (scan_active) + goto out; + + for_each_context(priv, ctx) { + if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging))) + iwl_legacy_commit_rxon(priv, ctx); + else + IWL_DEBUG_INFO(priv, + "Not re-sending same RXON configuration.\n"); + if (ht_changed[ctx->ctxid]) + iwl_legacy_update_qos(priv, ctx); + } + +out: + IWL_DEBUG_MAC80211(priv, "leave\n"); + mutex_unlock(&priv->mutex); + return ret; +} +EXPORT_SYMBOL(iwl_legacy_mac_config); + +void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + /* IBSS can only be the IWL_RXON_CTX_BSS context */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + if (WARN_ON(!priv->cfg->ops->legacy)) + return; + + mutex_lock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "enter\n"); + + spin_lock_irqsave(&priv->lock, flags); + memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); + spin_unlock_irqrestore(&priv->lock, flags); + + spin_lock_irqsave(&priv->lock, flags); + + /* new association get rid of ibss beacon skb */ + if (priv->beacon_skb) + dev_kfree_skb(priv->beacon_skb); + + priv->beacon_skb = NULL; + + priv->timestamp = 0; + + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_legacy_scan_cancel_timeout(priv, 100); + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); + mutex_unlock(&priv->mutex); + return; + } + + /* we are restarting association process + * clear RXON_FILTER_ASSOC_MSK bit + */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_legacy_commit_rxon(priv, ctx); + + iwl_legacy_set_rate(priv); + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} +EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf); + +static void iwl_legacy_ht_conf(struct iwl_priv *priv, + struct ieee80211_vif *vif) +{ + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + struct ieee80211_sta *sta; + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + IWL_DEBUG_ASSOC(priv, "enter:\n"); + + if (!ctx->ht.enabled) + return; + + ctx->ht.protection = + bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; + ctx->ht.non_gf_sta_present = + !!(bss_conf->ht_operation_mode & + IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); + + ht_conf->single_chain_sufficient = false; + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + rcu_read_lock(); + sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (sta) { + struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; + int maxstreams; + + maxstreams = (ht_cap->mcs.tx_params & + IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) + >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; + maxstreams += 1; + + if ((ht_cap->mcs.rx_mask[1] == 0) && + (ht_cap->mcs.rx_mask[2] == 0)) + ht_conf->single_chain_sufficient = true; + if (maxstreams <= 1) + ht_conf->single_chain_sufficient = true; + } else { + /* + * If at all, this can only happen through a race + * when the AP disconnects us while we're still + * setting up the connection, in that case mac80211 + * will soon tell us about that. + */ + ht_conf->single_chain_sufficient = true; + } + rcu_read_unlock(); + break; + case NL80211_IFTYPE_ADHOC: + ht_conf->single_chain_sufficient = true; + break; + default: + break; + } + + IWL_DEBUG_ASSOC(priv, "leave\n"); +} + +static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv, + struct ieee80211_vif *vif) +{ + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + + /* + * inform the ucode that there is no longer an + * association and that no more packets should be + * sent + */ + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + ctx->staging.assoc_id = 0; + iwl_legacy_commit_rxon(priv, ctx); +} + +static void iwl_legacy_beacon_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_priv *priv = hw->priv; + unsigned long flags; + __le64 timestamp; + struct sk_buff *skb = ieee80211_beacon_get(hw, vif); + + if (!skb) + return; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + lockdep_assert_held(&priv->mutex); + + if (!priv->beacon_ctx) { + IWL_ERR(priv, "update beacon but no beacon context!\n"); + dev_kfree_skb(skb); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + + if (priv->beacon_skb) + dev_kfree_skb(priv->beacon_skb); + + priv->beacon_skb = skb; + + timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; + priv->timestamp = le64_to_cpu(timestamp); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + spin_unlock_irqrestore(&priv->lock, flags); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); + return; + } + + priv->cfg->ops->legacy->post_associate(priv); +} + +void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif); + int ret; + + if (WARN_ON(!priv->cfg->ops->legacy)) + return; + + IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes); + + if (!iwl_legacy_is_alive(priv)) + return; + + mutex_lock(&priv->mutex); + + if (changes & BSS_CHANGED_QOS) { + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + ctx->qos_data.qos_active = bss_conf->qos; + iwl_legacy_update_qos(priv, ctx); + spin_unlock_irqrestore(&priv->lock, flags); + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + /* + * the add_interface code must make sure we only ever + * have a single interface that could be beaconing at + * any time. + */ + if (vif->bss_conf.enable_beacon) + priv->beacon_ctx = ctx; + else + priv->beacon_ctx = NULL; + } + + if (changes & BSS_CHANGED_BSSID) { + IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid); + + /* + * If there is currently a HW scan going on in the + * background then we need to cancel it else the RXON + * below/in post_associate will fail. + */ + if (iwl_legacy_scan_cancel_timeout(priv, 100)) { + IWL_WARN(priv, + "Aborted scan still in progress after 100ms\n"); + IWL_DEBUG_MAC80211(priv, + "leaving - scan abort failed.\n"); + mutex_unlock(&priv->mutex); + return; + } + + /* mac80211 only sets assoc when in STATION mode */ + if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) { + memcpy(ctx->staging.bssid_addr, + bss_conf->bssid, ETH_ALEN); + + /* currently needed in a few places */ + memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); + } else { + ctx->staging.filter_flags &= + ~RXON_FILTER_ASSOC_MSK; + } + + } + + /* + * This needs to be after setting the BSSID in case + * mac80211 decides to do both changes at once because + * it will invoke post_associate. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON) + iwl_legacy_beacon_update(hw, vif); + + if (changes & BSS_CHANGED_ERP_PREAMBLE) { + IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n", + bss_conf->use_short_preamble); + if (bss_conf->use_short_preamble) + ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; + else + ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; + } + + if (changes & BSS_CHANGED_ERP_CTS_PROT) { + IWL_DEBUG_MAC80211(priv, + "ERP_CTS %d\n", bss_conf->use_cts_prot); + if (bss_conf->use_cts_prot && + (priv->band != IEEE80211_BAND_5GHZ)) + ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; + else + ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; + if (bss_conf->use_cts_prot) + ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; + else + ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; + } + + if (changes & BSS_CHANGED_BASIC_RATES) { + /* XXX use this information + * + * To do that, remove code from iwl_legacy_set_rate() and put something + * like this here: + * + if (A-band) + ctx->staging.ofdm_basic_rates = + bss_conf->basic_rates; + else + ctx->staging.ofdm_basic_rates = + bss_conf->basic_rates >> 4; + ctx->staging.cck_basic_rates = + bss_conf->basic_rates & 0xF; + */ + } + + if (changes & BSS_CHANGED_HT) { + iwl_legacy_ht_conf(priv, vif); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + } + + if (changes & BSS_CHANGED_ASSOC) { + IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc); + if (bss_conf->assoc) { + priv->timestamp = bss_conf->timestamp; + + if (!iwl_legacy_is_rfkill(priv)) + priv->cfg->ops->legacy->post_associate(priv); + } else + iwl_legacy_set_no_assoc(priv, vif); + } + + if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) { + IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n", + changes); + ret = iwl_legacy_send_rxon_assoc(priv, ctx); + if (!ret) { + /* Sync active_rxon with latest change. */ + memcpy((void *)&ctx->active, + &ctx->staging, + sizeof(struct iwl_legacy_rxon_cmd)); + } + } + + if (changes & BSS_CHANGED_BEACON_ENABLED) { + if (vif->bss_conf.enable_beacon) { + memcpy(ctx->staging.bssid_addr, + bss_conf->bssid, ETH_ALEN); + memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); + priv->cfg->ops->legacy->config_ap(priv); + } else + iwl_legacy_set_no_assoc(priv, vif); + } + + if (changes & BSS_CHANGED_IBSS) { + ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif, + bss_conf->ibss_joined); + if (ret) + IWL_ERR(priv, "failed to %s IBSS station %pM\n", + bss_conf->ibss_joined ? "add" : "remove", + bss_conf->bssid); + } + + mutex_unlock(&priv->mutex); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} +EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed); + +irqreturn_t iwl_legacy_isr(int irq, void *data) +{ + struct iwl_priv *priv = data; + u32 inta, inta_mask; + u32 inta_fh; + unsigned long flags; + if (!priv) + return IRQ_NONE; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable (but don't clear!) interrupts here to avoid + * back-to-back ISRs and sporadic interrupts from our NIC. + * If we have something to service, the tasklet will re-enable ints. + * If we *don't* have something, we'll re-enable before leaving here. */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* Discover which interrupts are active/pending */ + inta = iwl_read32(priv, CSR_INT); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + + /* Ignore interrupt if there's nothing in NIC to service. + * This may be due to IRQ shared with another device, + * or due to sporadic interrupts thrown from our NIC. */ + if (!inta && !inta_fh) { + IWL_DEBUG_ISR(priv, + "Ignore interrupt, inta == 0, inta_fh == 0\n"); + goto none; + } + + if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { + /* Hardware disappeared. It might have already raised + * an interrupt */ + IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); + goto unplugged; + } + + IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + + inta &= ~CSR_INT_BIT_SCD; + + /* iwl_irq_tasklet() will service interrupts and re-enable them */ + if (likely(inta || inta_fh)) + tasklet_schedule(&priv->irq_tasklet); + +unplugged: + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_HANDLED; + +none: + /* re-enable interrupts here since we don't have anything to service. */ + /* only Re-enable if diabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_legacy_enable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + return IRQ_NONE; +} +EXPORT_SYMBOL(iwl_legacy_isr); + +/* + * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this + * function. + */ +void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags) +{ + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { + *tx_flags |= TX_CMD_FLG_RTS_MSK; + *tx_flags &= ~TX_CMD_FLG_CTS_MSK; + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + + if (!ieee80211_is_mgmt(fc)) + return; + + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_AUTH): + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + *tx_flags &= ~TX_CMD_FLG_RTS_MSK; + *tx_flags |= TX_CMD_FLG_CTS_MSK; + break; + } + } else if (info->control.rates[0].flags & + IEEE80211_TX_RC_USE_CTS_PROTECT) { + *tx_flags &= ~TX_CMD_FLG_RTS_MSK; + *tx_flags |= TX_CMD_FLG_CTS_MSK; + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + } +} +EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection); diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h new file mode 100644 index 000000000000..f03b463e4378 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-core.h @@ -0,0 +1,646 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_legacy_core_h__ +#define __iwl_legacy_core_h__ + +/************************ + * forward declarations * + ************************/ +struct iwl_host_cmd; +struct iwl_cmd; + + +#define IWLWIFI_VERSION "in-tree:" +#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" +#define DRV_AUTHOR "<ilw@linux.intel.com>" + +#define IWL_PCI_DEVICE(dev, subdev, cfg) \ + .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = (subdev), \ + .driver_data = (kernel_ulong_t)&(cfg) + +#define TIME_UNIT 1024 + +#define IWL_SKU_G 0x1 +#define IWL_SKU_A 0x2 +#define IWL_SKU_N 0x8 + +#define IWL_CMD(x) case x: return #x + +struct iwl_hcmd_ops { + int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx); + int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx); + void (*set_rxon_chain)(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +}; + +struct iwl_hcmd_utils_ops { + u16 (*get_hcmd_size)(u8 cmd_id, u16 len); + u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd, + u8 *data); + int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif); + void (*post_scan)(struct iwl_priv *priv); +}; + +struct iwl_apm_ops { + int (*init)(struct iwl_priv *priv); + void (*config)(struct iwl_priv *priv); +}; + +struct iwl_debugfs_ops { + ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); + ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); + ssize_t (*general_stats_read)(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos); +}; + +struct iwl_temp_ops { + void (*temperature)(struct iwl_priv *priv); +}; + +struct iwl_lib_ops { + /* set hw dependent parameters */ + int (*set_hw_params)(struct iwl_priv *priv); + /* Handling TX */ + void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + u16 byte_cnt); + int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, + u16 len, u8 reset, u8 pad); + void (*txq_free_tfd)(struct iwl_priv *priv, + struct iwl_tx_queue *txq); + int (*txq_init)(struct iwl_priv *priv, + struct iwl_tx_queue *txq); + /* setup Rx handler */ + void (*rx_handler_setup)(struct iwl_priv *priv); + /* alive notification after init uCode load */ + void (*init_alive_start)(struct iwl_priv *priv); + /* check validity of rtc data address */ + int (*is_valid_rtc_data_addr)(u32 addr); + /* 1st ucode load */ + int (*load_ucode)(struct iwl_priv *priv); + int (*dump_nic_event_log)(struct iwl_priv *priv, + bool full_log, char **buf, bool display); + void (*dump_nic_error_log)(struct iwl_priv *priv); + int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display); + int (*set_channel_switch)(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch); + /* power management */ + struct iwl_apm_ops apm_ops; + + /* power */ + int (*send_tx_power) (struct iwl_priv *priv); + void (*update_chain_flags)(struct iwl_priv *priv); + + /* eeprom operations (as defined in iwl-eeprom.h) */ + struct iwl_eeprom_ops eeprom_ops; + + /* temperature */ + struct iwl_temp_ops temp_ops; + /* check for plcp health */ + bool (*check_plcp_health)(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); + + struct iwl_debugfs_ops debugfs_ops; + +}; + +struct iwl_led_ops { + int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd); +}; + +struct iwl_legacy_ops { + void (*post_associate)(struct iwl_priv *priv); + void (*config_ap)(struct iwl_priv *priv); + /* station management */ + int (*update_bcast_stations)(struct iwl_priv *priv); + int (*manage_ibss_station)(struct iwl_priv *priv, + struct ieee80211_vif *vif, bool add); +}; + +struct iwl_ops { + const struct iwl_lib_ops *lib; + const struct iwl_hcmd_ops *hcmd; + const struct iwl_hcmd_utils_ops *utils; + const struct iwl_led_ops *led; + const struct iwl_nic_ops *nic; + const struct iwl_legacy_ops *legacy; + const struct ieee80211_ops *ieee80211_ops; +}; + +struct iwl_mod_params { + int sw_crypto; /* def: 0 = using hardware encryption */ + int disable_hw_scan; /* def: 0 = use h/w scan */ + int num_of_queues; /* def: HW dependent */ + int disable_11n; /* def: 0 = 11n capabilities enabled */ + int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ + int antenna; /* def: 0 = both antennas (use diversity) */ + int restart_fw; /* def: 1 = restart firmware */ +}; + +/* + * @led_compensation: compensate on the led on/off time per HW according + * to the deviation to achieve the desired led frequency. + * The detail algorithm is described in iwl-led.c + * @chain_noise_num_beacons: number of beacons used to compute chain noise + * @plcp_delta_threshold: plcp error rate threshold used to trigger + * radio tuning when there is a high receiving plcp error rate + * @wd_timeout: TX queues watchdog timeout + * @temperature_kelvin: temperature report by uCode in kelvin + * @max_event_log_size: size of event log buffer size for ucode event logging + * @ucode_tracing: support ucode continuous tracing + * @sensitivity_calib_by_driver: driver has the capability to perform + * sensitivity calibration operation + * @chain_noise_calib_by_driver: driver has the capability to perform + * chain noise calibration operation + */ +struct iwl_base_params { + int eeprom_size; + int num_of_queues; /* def: HW dependent */ + int num_of_ampdu_queues;/* def: HW dependent */ + /* for iwl_legacy_apm_init() */ + u32 pll_cfg_val; + bool set_l0s; + bool use_bsm; + + u16 led_compensation; + int chain_noise_num_beacons; + u8 plcp_delta_threshold; + unsigned int wd_timeout; + bool temperature_kelvin; + u32 max_event_log_size; + const bool ucode_tracing; + const bool sensitivity_calib_by_driver; + const bool chain_noise_calib_by_driver; +}; + +/** + * struct iwl_cfg + * @fw_name_pre: Firmware filename prefix. The api version and extension + * (.ucode) will be added to filename before loading from disk. The + * filename is constructed as fw_name_pre<api>.ucode. + * @ucode_api_max: Highest version of uCode API supported by driver. + * @ucode_api_min: Lowest version of uCode API supported by driver. + * @scan_antennas: available antenna for scan operation + * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) + * + * We enable the driver to be backward compatible wrt API version. The + * driver specifies which APIs it supports (with @ucode_api_max being the + * highest and @ucode_api_min the lowest). Firmware will only be loaded if + * it has a supported API version. The firmware's API version will be + * stored in @iwl_priv, enabling the driver to make runtime changes based + * on firmware version used. + * + * For example, + * if (IWL_UCODE_API(priv->ucode_ver) >= 2) { + * Driver interacts with Firmware API version >= 2. + * } else { + * Driver interacts with Firmware API version 1. + * } + * + * The ideal usage of this infrastructure is to treat a new ucode API + * release as a new hardware revision. That is, through utilizing the + * iwl_hcmd_utils_ops etc. we accommodate different command structures + * and flows between hardware versions as well as their API + * versions. + * + */ +struct iwl_cfg { + /* params specific to an individual device within a device family */ + const char *name; + const char *fw_name_pre; + const unsigned int ucode_api_max; + const unsigned int ucode_api_min; + u8 valid_tx_ant; + u8 valid_rx_ant; + unsigned int sku; + u16 eeprom_ver; + u16 eeprom_calib_ver; + const struct iwl_ops *ops; + /* module based parameters which can be set from modprobe cmd */ + const struct iwl_mod_params *mod_params; + /* params not likely to change within a device family */ + struct iwl_base_params *base_params; + /* params likely to change within a device family */ + u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; + u8 scan_tx_antennas[IEEE80211_NUM_BANDS]; + enum iwl_led_mode led_mode; +}; + +/*************************** + * L i b * + ***************************/ + +struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg); +int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, + const struct ieee80211_tx_queue_params *params); +int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw); +void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + int hw_decrypt); +int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl_legacy_full_rxon_required(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +int iwl_legacy_set_rxon_channel(struct iwl_priv *priv, + struct ieee80211_channel *ch, + struct iwl_rxon_context *ctx); +void iwl_legacy_set_flags_for_band(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + enum ieee80211_band band, + struct ieee80211_vif *vif); +u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv, + enum ieee80211_band band); +void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, + struct iwl_ht_config *ht_conf); +bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct ieee80211_sta_ht_cap *ht_cap); +void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +void iwl_legacy_set_rate(struct iwl_priv *priv); +int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u32 decrypt_res, + struct ieee80211_rx_status *stats); +void iwl_legacy_irq_handle_error(struct iwl_priv *priv); +int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype newtype, bool newp2p); +int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv); +void iwl_legacy_txq_mem(struct iwl_priv *priv); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv); +void iwl_legacy_free_traffic_mem(struct iwl_priv *priv); +void iwl_legacy_reset_traffic_log(struct iwl_priv *priv); +void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header); +void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header); +const char *iwl_legacy_get_mgmt_string(int cmd); +const char *iwl_legacy_get_ctrl_string(int cmd); +void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv); +void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, + u16 len); +#else +static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv) +{ + return 0; +} +static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv) +{ +} +static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv) +{ +} +static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ +} +static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv, + u16 length, struct ieee80211_hdr *header) +{ +} +static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, + __le16 fc, u16 len) +{ +} +#endif +/***************************************************** + * RX handlers. + * **************************************************/ +void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_legacy_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + +/***************************************************** +* RX +******************************************************/ +void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv); +void iwl_legacy_cmd_queue_free(struct iwl_priv *priv); +int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv); +void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_rx_queue *q); +int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q); +void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +/* Handlers */ +void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); +void iwl_legacy_recover_from_statistics(struct iwl_priv *priv, + struct iwl_rx_packet *pkt); +void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success); +void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); + +/* TX helpers */ + +/***************************************************** +* TX +******************************************************/ +void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, + struct iwl_tx_queue *txq); +int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id); +void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int slots_num, u32 txq_id); +void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id); +void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id); +void iwl_legacy_setup_watchdog(struct iwl_priv *priv); +/***************************************************** + * TX power + ****************************************************/ +int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force); + +/******************************************************************************* + * Rate + ******************************************************************************/ + +u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); + +/******************************************************************************* + * Scanning + ******************************************************************************/ +void iwl_legacy_init_scan_params(struct iwl_priv *priv); +int iwl_legacy_scan_cancel(struct iwl_priv *priv); +int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); +void iwl_legacy_force_scan_end(struct iwl_priv *priv); +int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req); +void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv); +int iwl_legacy_force_reset(struct iwl_priv *priv, int mode, bool external); +u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv, + struct ieee80211_mgmt *frame, + const u8 *ta, const u8 *ie, int ie_len, int left); +void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv); +u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + u8 n_probes); +u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + struct ieee80211_vif *vif); +void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv); +void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv); + +/* For faster active scanning, scan will move to the next channel if fewer than + * PLCP_QUIET_THRESH packets are heard on this channel within + * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell + * time if it's a quiet channel (nothing responded to our probe, and there's + * no other traffic). + * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ +#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ +#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ + +#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7) + +/***************************************************** + * S e n d i n g H o s t C o m m a n d s * + *****************************************************/ + +const char *iwl_legacy_get_cmd_string(u8 cmd); +int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv, + struct iwl_host_cmd *cmd); +int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); +int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, + u16 len, const void *data); +int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len, + const void *data, + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt)); + +int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); + + +/***************************************************** + * PCI * + *****************************************************/ + +static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv) +{ + int pos; + u16 pci_lnk_ctl; + pos = pci_find_capability(priv->pci_dev, PCI_CAP_ID_EXP); + pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); + return pci_lnk_ctl; +} + +void iwl_legacy_bg_watchdog(unsigned long data); +u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv, + u32 usec, u32 beacon_interval); +__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base, + u32 addon, u32 beacon_interval); + +#ifdef CONFIG_PM +int iwl_legacy_pci_suspend(struct device *device); +int iwl_legacy_pci_resume(struct device *device); +extern const struct dev_pm_ops iwl_legacy_pm_ops; + +#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops) + +#else /* !CONFIG_PM */ + +#define IWL_LEGACY_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +/***************************************************** +* Error Handling Debugging +******************************************************/ +void iwl4965_dump_nic_error_log(struct iwl_priv *priv); +int iwl4965_dump_nic_event_log(struct iwl_priv *priv, + bool full_log, char **buf, bool display); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +#else +static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ +} +#endif + +void iwl_legacy_clear_isr_stats(struct iwl_priv *priv); + +/***************************************************** +* GEOS +******************************************************/ +int iwl_legacy_init_geos(struct iwl_priv *priv); +void iwl_legacy_free_geos(struct iwl_priv *priv); + +/*************** DRIVER STATUS FUNCTIONS *****/ + +#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ +/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */ +#define STATUS_INT_ENABLED 2 +#define STATUS_RF_KILL_HW 3 +#define STATUS_CT_KILL 4 +#define STATUS_INIT 5 +#define STATUS_ALIVE 6 +#define STATUS_READY 7 +#define STATUS_TEMPERATURE 8 +#define STATUS_GEO_CONFIGURED 9 +#define STATUS_EXIT_PENDING 10 +#define STATUS_STATISTICS 12 +#define STATUS_SCANNING 13 +#define STATUS_SCAN_ABORTING 14 +#define STATUS_SCAN_HW 15 +#define STATUS_POWER_PMI 16 +#define STATUS_FW_ERROR 17 + + +static inline int iwl_legacy_is_ready(struct iwl_priv *priv) +{ + /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are + * set but EXIT_PENDING is not */ + return test_bit(STATUS_READY, &priv->status) && + test_bit(STATUS_GEO_CONFIGURED, &priv->status) && + !test_bit(STATUS_EXIT_PENDING, &priv->status); +} + +static inline int iwl_legacy_is_alive(struct iwl_priv *priv) +{ + return test_bit(STATUS_ALIVE, &priv->status); +} + +static inline int iwl_legacy_is_init(struct iwl_priv *priv) +{ + return test_bit(STATUS_INIT, &priv->status); +} + +static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv) +{ + return test_bit(STATUS_RF_KILL_HW, &priv->status); +} + +static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv) +{ + return iwl_legacy_is_rfkill_hw(priv); +} + +static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv) +{ + return test_bit(STATUS_CT_KILL, &priv->status); +} + +static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv) +{ + + if (iwl_legacy_is_rfkill(priv)) + return 0; + + return iwl_legacy_is_ready(priv); +} + +extern void iwl_legacy_send_bt_config(struct iwl_priv *priv); +extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv, + u8 flags, bool clear); +void iwl_legacy_apm_stop(struct iwl_priv *priv); +int iwl_legacy_apm_init(struct iwl_priv *priv); + +int iwl_legacy_send_rxon_timing(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx); +} +static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + return priv->cfg->ops->hcmd->commit_rxon(priv, ctx); +} +static inline const struct ieee80211_supported_band *iwl_get_hw_mode( + struct iwl_priv *priv, enum ieee80211_band band) +{ + return priv->hw->wiphy->bands[band]; +} + +/* mac80211 handlers */ +int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed); +void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw); +void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changes); +void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags); + +irqreturn_t iwl_legacy_isr(int irq, void *data); + +#endif /* __iwl_legacy_core_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/iwl-csr.h new file mode 100644 index 000000000000..668a9616c269 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-csr.h @@ -0,0 +1,422 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_legacy_csr_h__ +#define __iwl_legacy_csr_h__ +/* + * CSR (control and status registers) + * + * CSR registers are mapped directly into PCI bus space, and are accessible + * whenever platform supplies power to device, even when device is in + * low power states due to driver-invoked device resets + * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes. + * + * Use iwl_write32() and iwl_read32() family to access these registers; + * these provide simple PCI bus access, without waking up the MAC. + * Do not use iwl_legacy_write_direct32() family for these registers; + * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ. + * The MAC (uCode processor, etc.) does not need to be powered up for accessing + * the CSR registers. + * + * NOTE: Device does need to be awake in order to read this memory + * via CSR_EEPROM register + */ +#define CSR_BASE (0x000) + +#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ +#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ +#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ +#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ +#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ +#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */ +#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ +#define CSR_GP_CNTRL (CSR_BASE+0x024) + +/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */ +#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005) + +/* + * Hardware revision info + * Bit fields: + * 31-8: Reserved + * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions + * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D + * 1-0: "Dash" (-) value, as in A-1, etc. + * + * NOTE: Revision step affects calculation of CCK txpower for 4965. + * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965). + */ +#define CSR_HW_REV (CSR_BASE+0x028) + +/* + * EEPROM memory reads + * + * NOTE: Device must be awake, initialized via apm_ops.init(), + * in order to read. + */ +#define CSR_EEPROM_REG (CSR_BASE+0x02c) +#define CSR_EEPROM_GP (CSR_BASE+0x030) + +#define CSR_GIO_REG (CSR_BASE+0x03C) +#define CSR_GP_UCODE_REG (CSR_BASE+0x048) +#define CSR_GP_DRIVER_REG (CSR_BASE+0x050) + +/* + * UCODE-DRIVER GP (general purpose) mailbox registers. + * SET/CLR registers set/clear bit(s) if "1" is written. + */ +#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) +#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) +#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) +#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) + +#define CSR_LED_REG (CSR_BASE+0x094) +#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) + +/* GIO Chicken Bits (PCI Express bus link power management) */ +#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) + +/* Analog phase-lock-loop configuration */ +#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) + +/* + * CSR Hardware Revision Workaround Register. Indicates hardware rev; + * "step" determines CCK backoff for txpower calculation. Used for 4965 only. + * See also CSR_HW_REV register. + * Bit fields: + * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step + * 1-0: "Dash" (-) value, as in C-1, etc. + */ +#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) + +#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) +#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250) + +/* Bits for CSR_HW_IF_CONFIG_REG */ +#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010) +#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00) +#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100) +#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200) + +#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100) +#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200) +#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400) +#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800) +#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000) +#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000) + +#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) +#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */ +#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */ +#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */ + +#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/ +#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/ + +/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), + * acknowledged (reset) by host writing "1" to flagged bits. */ +#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */ +#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */ +#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */ +#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */ +#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */ +#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */ +#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */ +#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */ +#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */ +#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */ +#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */ + +#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \ + CSR_INT_BIT_HW_ERR | \ + CSR_INT_BIT_FH_TX | \ + CSR_INT_BIT_SW_ERR | \ + CSR_INT_BIT_RF_KILL | \ + CSR_INT_BIT_SW_RX | \ + CSR_INT_BIT_WAKEUP | \ + CSR_INT_BIT_ALIVE) + +/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */ +#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */ +#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */ +#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */ +#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */ +#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */ +#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */ +#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */ +#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */ + +#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ + CSR39_FH_INT_BIT_RX_CHNL2 | \ + CSR_FH_INT_BIT_RX_CHNL1 | \ + CSR_FH_INT_BIT_RX_CHNL0) + + +#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \ + CSR_FH_INT_BIT_TX_CHNL1 | \ + CSR_FH_INT_BIT_TX_CHNL0) + +#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \ + CSR_FH_INT_BIT_RX_CHNL1 | \ + CSR_FH_INT_BIT_RX_CHNL0) + +#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \ + CSR_FH_INT_BIT_TX_CHNL0) + +/* GPIO */ +#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200) +#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000) +#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC (0x00000200) + +/* RESET */ +#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001) +#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002) +#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) +#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) +#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) +#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000) + +/* + * GP (general purpose) CONTROL REGISTER + * Bit fields: + * 27: HW_RF_KILL_SW + * Indicates state of (platform's) hardware RF-Kill switch + * 26-24: POWER_SAVE_TYPE + * Indicates current power-saving mode: + * 000 -- No power saving + * 001 -- MAC power-down + * 010 -- PHY (radio) power-down + * 011 -- Error + * 9-6: SYS_CONFIG + * Indicates current system configuration, reflecting pins on chip + * as forced high/low by device circuit board. + * 4: GOING_TO_SLEEP + * Indicates MAC is entering a power-saving sleep power-down. + * Not a good time to access device-internal resources. + * 3: MAC_ACCESS_REQ + * Host sets this to request and maintain MAC wakeup, to allow host + * access to device-internal resources. Host must wait for + * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR + * device registers. + * 2: INIT_DONE + * Host sets this to put device into fully operational D0 power mode. + * Host resets this after SW_RESET to put device into low power mode. + * 0: MAC_CLOCK_READY + * Indicates MAC (ucode processor, etc.) is powered up and can run. + * Internal resources are accessible. + * NOTE: This does not indicate that the processor is actually running. + * NOTE: This does not indicate that 4965 or 3945 has completed + * init or post-power-down restore of internal SRAM memory. + * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that + * SRAM is restored and uCode is in normal operation mode. + * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and + * do not need to save/restore it. + * NOTE: After device reset, this bit remains "0" until host sets + * INIT_DONE + */ +#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) +#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) +#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) +#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010) + +#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001) + +#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000) +#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000) +#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000) + + +/* EEPROM REG */ +#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) +#define CSR_EEPROM_REG_BIT_CMD (0x00000002) +#define CSR_EEPROM_REG_MSK_ADDR (0x0000FFFC) +#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) + +/* EEPROM GP */ +#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */ +#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) +#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002) +#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004) + +/* GP REG */ +#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */ +#define CSR_GP_REG_NO_POWER_SAVE (0x00000000) +#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000) +#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000) +#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000) + + +/* CSR GIO */ +#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) + +/* + * UCODE-DRIVER GP (general purpose) mailbox register 1 + * Host driver and uCode write and/or read this register to communicate with + * each other. + * Bit fields: + * 4: UCODE_DISABLE + * Host sets this to request permanent halt of uCode, same as + * sending CARD_STATE command with "halt" bit set. + * 3: CT_KILL_EXIT + * Host sets this to request exit from CT_KILL state, i.e. host thinks + * device temperature is low enough to continue normal operation. + * 2: CMD_BLOCKED + * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL) + * to release uCode to clear all Tx and command queues, enter + * unassociated mode, and power down. + * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit. + * 1: SW_BIT_RFKILL + * Host sets this when issuing CARD_STATE command to request + * device sleep. + * 0: MAC_SLEEP + * uCode sets this when preparing a power-saving power-down. + * uCode resets this when power-up is complete and SRAM is sane. + * NOTE: 3945/4965 saves internal SRAM data to host when powering down, + * and must restore this data after powering back up. + * MAC_SLEEP is the best indication that restore is complete. + * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and + * do not need to save/restore it. + */ +#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) +#define CSR_UCODE_SW_BIT_RFKILL (0x00000002) +#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) +#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008) + +/* GIO Chicken Bits (PCI Express bus link power management) */ +#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) +#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) + +/* LED */ +#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) +#define CSR_LED_REG_TRUN_ON (0x78) +#define CSR_LED_REG_TRUN_OFF (0x38) + +/* ANA_PLL */ +#define CSR39_ANA_PLL_CFG_VAL (0x01000000) + +/* HPET MEM debug */ +#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000) + +/* DRAM INT TABLE */ +#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) +#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) + +/* + * HBUS (Host-side Bus) + * + * HBUS registers are mapped directly into PCI bus space, but are used + * to indirectly access device's internal memory or registers that + * may be powered-down. + * + * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family + * for these registers; + * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ + * to make sure the MAC (uCode processor, etc.) is powered up for accessing + * internal resources. + * + * Do not use iwl_write32()/iwl_read32() family to access these registers; + * these provide only simple PCI bus access, without waking up the MAC. + */ +#define HBUS_BASE (0x400) + +/* + * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM + * structures, error log, event log, verifying uCode load). + * First write to address register, then read from or write to data register + * to complete the job. Once the address register is set up, accesses to + * data registers auto-increment the address by one dword. + * Bit usage for address registers (read or write): + * 0-31: memory address within device + */ +#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c) +#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010) +#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018) +#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c) + +/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */ +#define HBUS_TARG_MBX_C (HBUS_BASE+0x030) +#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004) + +/* + * Registers for accessing device's internal peripheral registers + * (e.g. SCD, BSM, etc.). First write to address register, + * then read from or write to data register to complete the job. + * Bit usage for address registers (read or write): + * 0-15: register address (offset) within device + * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword) + */ +#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044) +#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048) +#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) +#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) + +/* + * Per-Tx-queue write pointer (index, really!) + * Indicates index to next TFD that driver will fill (1 past latest filled). + * Bit usage: + * 0-7: queue write index + * 11-8: queue selector + */ +#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) + +#endif /* !__iwl_legacy_csr_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h new file mode 100644 index 000000000000..ae13112701bf --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-debug.h @@ -0,0 +1,198 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_debug_h__ +#define __iwl_legacy_debug_h__ + +struct iwl_priv; +extern u32 iwlegacy_debug_level; + +#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a) +#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a) +#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a) +#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a) + +#define iwl_print_hex_error(priv, p, len) \ +do { \ + print_hex_dump(KERN_ERR, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +#define IWL_DEBUG(__priv, level, fmt, args...) \ +do { \ + if (iwl_legacy_get_debug_level(__priv) & (level)) \ + dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ + "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ + __func__ , ## args); \ +} while (0) + +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \ +do { \ + if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \ + dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ + "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ + __func__ , ## args); \ +} while (0) + +#define iwl_print_hex_dump(priv, level, p, len) \ +do { \ + if (iwl_legacy_get_debug_level(priv) & level) \ + print_hex_dump(KERN_DEBUG, "iwl data: ", \ + DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ +} while (0) + +#else +#define IWL_DEBUG(__priv, level, fmt, args...) +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) +static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, + const void *p, u32 len) +{} +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS +int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name); +void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv); +#else +static inline int +iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name) +{ + return 0; +} +static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv) +{ +} +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */ + +/* + * To use the debug system: + * + * If you are defining a new debug classification, simply add it to the #define + * list here in the form of + * + * #define IWL_DL_xxxx VALUE + * + * where xxxx should be the name of the classification (for example, WEP). + * + * You then need to either add a IWL_xxxx_DEBUG() macro definition for your + * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want + * to send output to that classification. + * + * The active debug levels can be accessed via files + * + * /sys/module/iwl4965/parameters/debug{50} + * /sys/module/iwl3945/parameters/debug + * /sys/class/net/wlan0/device/debug_level + * + * when CONFIG_IWLWIFI_LEGACY_DEBUG=y. + */ + +/* 0x0000000F - 0x00000001 */ +#define IWL_DL_INFO (1 << 0) +#define IWL_DL_MAC80211 (1 << 1) +#define IWL_DL_HCMD (1 << 2) +#define IWL_DL_STATE (1 << 3) +/* 0x000000F0 - 0x00000010 */ +#define IWL_DL_MACDUMP (1 << 4) +#define IWL_DL_HCMD_DUMP (1 << 5) +#define IWL_DL_EEPROM (1 << 6) +#define IWL_DL_RADIO (1 << 7) +/* 0x00000F00 - 0x00000100 */ +#define IWL_DL_POWER (1 << 8) +#define IWL_DL_TEMP (1 << 9) +#define IWL_DL_NOTIF (1 << 10) +#define IWL_DL_SCAN (1 << 11) +/* 0x0000F000 - 0x00001000 */ +#define IWL_DL_ASSOC (1 << 12) +#define IWL_DL_DROP (1 << 13) +#define IWL_DL_TXPOWER (1 << 14) +#define IWL_DL_AP (1 << 15) +/* 0x000F0000 - 0x00010000 */ +#define IWL_DL_FW (1 << 16) +#define IWL_DL_RF_KILL (1 << 17) +#define IWL_DL_FW_ERRORS (1 << 18) +#define IWL_DL_LED (1 << 19) +/* 0x00F00000 - 0x00100000 */ +#define IWL_DL_RATE (1 << 20) +#define IWL_DL_CALIB (1 << 21) +#define IWL_DL_WEP (1 << 22) +#define IWL_DL_TX (1 << 23) +/* 0x0F000000 - 0x01000000 */ +#define IWL_DL_RX (1 << 24) +#define IWL_DL_ISR (1 << 25) +#define IWL_DL_HT (1 << 26) +#define IWL_DL_IO (1 << 27) +/* 0xF0000000 - 0x10000000 */ +#define IWL_DL_11H (1 << 28) +#define IWL_DL_STATS (1 << 29) +#define IWL_DL_TX_REPLY (1 << 30) +#define IWL_DL_QOS (1 << 31) + +#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) +#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a) +#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) +#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) +#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) +#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a) +#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a) +#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a) +#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) +#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) +#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a) +#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a) +#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) +#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) +#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) +#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) +#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a) +#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a) +#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a) +#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a) +#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a) +#define IWL_DEBUG_ASSOC(p, f, a...) \ + IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) +#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a) +#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a) +#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \ + IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a) +#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a) +#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a) +#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) +#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) + +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c new file mode 100644 index 000000000000..2d32438b4cb8 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-debugfs.c @@ -0,0 +1,1467 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#include <linux/ieee80211.h> +#include <net/mac80211.h> + + +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-core.h" +#include "iwl-io.h" + +/* create and remove of files */ +#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ + if (!debugfs_create_file(#name, mode, parent, priv, \ + &iwl_legacy_dbgfs_##name##_ops)) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +#define DEBUGFS_ADD_X32(name, parent, ptr) do { \ + struct dentry *__tmp; \ + __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \ + parent, ptr); \ + if (IS_ERR(__tmp) || !__tmp) \ + goto err; \ +} while (0) + +/* file operation */ +#define DEBUGFS_READ_FUNC(name) \ +static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos); + +#define DEBUGFS_WRITE_FUNC(name) \ +static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos); + + +static int +iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +#define DEBUGFS_READ_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ +static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ + .read = iwl_legacy_dbgfs_##name##_read, \ + .open = iwl_legacy_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_WRITE_FILE_OPS(name) \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ + .write = iwl_legacy_dbgfs_##name##_write, \ + .open = iwl_legacy_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +#define DEBUGFS_READ_WRITE_FILE_OPS(name) \ + DEBUGFS_READ_FUNC(name); \ + DEBUGFS_WRITE_FUNC(name); \ +static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \ + .write = iwl_legacy_dbgfs_##name##_write, \ + .read = iwl_legacy_dbgfs_##name##_read, \ + .open = iwl_legacy_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ +}; + +static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + + int cnt; + ssize_t ret; + const size_t bufsz = 100 + + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); + for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_mgmt_string(cnt), + priv->tx_stats.mgmt[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Control\n"); + for (cnt = 0; cnt < CONTROL_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_ctrl_string(cnt), + priv->tx_stats.ctrl[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); + pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", + priv->tx_stats.data_cnt); + pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", + priv->tx_stats.data_bytes); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t +iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + u32 clear_flag; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &clear_flag) != 1) + return -EFAULT; + iwl_legacy_clear_traffic_stats(priv); + + return count; +} + +static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + int cnt; + ssize_t ret; + const size_t bufsz = 100 + + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); + for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_mgmt_string(cnt), + priv->rx_stats.mgmt[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Control:\n"); + for (cnt = 0; cnt < CONTROL_MAX; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\t%25s\t\t: %u\n", + iwl_legacy_get_ctrl_string(cnt), + priv->rx_stats.ctrl[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "Data:\n"); + pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n", + priv->rx_stats.data_cnt); + pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n", + priv->rx_stats.data_bytes); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +#define BYTE1_MASK 0x000000ff; +#define BYTE2_MASK 0x0000ffff; +#define BYTE3_MASK 0x00ffffff; +static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + u32 val; + char *buf; + ssize_t ret; + int i; + int pos = 0; + struct iwl_priv *priv = file->private_data; + size_t bufsz; + + /* default is to dump the entire data segment */ + if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { + priv->dbgfs_sram_offset = 0x800000; + if (priv->ucode_type == UCODE_INIT) + priv->dbgfs_sram_len = priv->ucode_init_data.len; + else + priv->dbgfs_sram_len = priv->ucode_data.len; + } + bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10; + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", + priv->dbgfs_sram_len); + pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", + priv->dbgfs_sram_offset); + for (i = priv->dbgfs_sram_len; i > 0; i -= 4) { + val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \ + priv->dbgfs_sram_len - i); + if (i < 4) { + switch (i) { + case 1: + val &= BYTE1_MASK; + break; + case 2: + val &= BYTE2_MASK; + break; + case 3: + val &= BYTE3_MASK; + break; + } + } + if (!(i % 16)) + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[64]; + int buf_size; + u32 offset, len; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + if (sscanf(buf, "%x,%x", &offset, &len) == 2) { + priv->dbgfs_sram_offset = offset; + priv->dbgfs_sram_len = len; + } else { + priv->dbgfs_sram_offset = 0; + priv->dbgfs_sram_len = 0; + } + + return count; +} + +static ssize_t +iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_station_entry *station; + int max_sta = priv->hw_params.max_stations; + char *buf; + int i, j, pos = 0; + ssize_t ret; + /* Add 30 for initial string */ + const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations); + + buf = kmalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n", + priv->num_stations); + + for (i = 0; i < max_sta; i++) { + station = &priv->stations[i]; + if (!station->used) + continue; + pos += scnprintf(buf + pos, bufsz - pos, + "station %d - addr: %pM, flags: %#x\n", + i, station->sta.sta.addr, + station->sta.station_flags_msk); + pos += scnprintf(buf + pos, bufsz - pos, + "TID\tseq_num\ttxq_id\tframes\ttfds\t"); + pos += scnprintf(buf + pos, bufsz - pos, + "start_idx\tbitmap\t\t\trate_n_flags\n"); + + for (j = 0; j < MAX_TID_COUNT; j++) { + pos += scnprintf(buf + pos, bufsz - pos, + "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x", + j, station->tid[j].seq_number, + station->tid[j].agg.txq_id, + station->tid[j].agg.frame_count, + station->tid[j].tfds_in_queue, + station->tid[j].agg.start_idx, + station->tid[j].agg.bitmap, + station->tid[j].agg.rate_n_flags); + + if (station->tid[j].agg.wait_for_ba) + pos += scnprintf(buf + pos, bufsz - pos, + " - waitforba"); + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file, + char __user *user_buf, + size_t count, + loff_t *ppos) +{ + ssize_t ret; + struct iwl_priv *priv = file->private_data; + int pos = 0, ofs = 0, buf_size = 0; + const u8 *ptr; + char *buf; + u16 eeprom_ver; + size_t eeprom_len = priv->cfg->base_params->eeprom_size; + buf_size = 4 * eeprom_len + 256; + + if (eeprom_len % 16) { + IWL_ERR(priv, "NVM size is not multiple of 16.\n"); + return -ENODATA; + } + + ptr = priv->eeprom; + if (!ptr) { + IWL_ERR(priv, "Invalid EEPROM memory\n"); + return -ENOMEM; + } + + /* 4 characters for byte 0xYY */ + buf = kzalloc(buf_size, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION); + pos += scnprintf(buf + pos, buf_size - pos, "EEPROM " + "version: 0x%x\n", eeprom_ver); + for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { + pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, + buf_size - pos, 0); + pos += strlen(buf + pos); + if (buf_size - pos > 0) + buf[pos++] = '\n'; + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_log_event_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + ssize_t ret = -ENOMEM; + + ret = pos = priv->cfg->ops->lib->dump_nic_event_log( + priv, true, &buf, true); + if (buf) { + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + } + return ret; +} + +static ssize_t iwl_legacy_dbgfs_log_event_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + u32 event_log_flag; + char buf[8]; + int buf_size; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &event_log_flag) != 1) + return -EFAULT; + if (event_log_flag == 1) + priv->cfg->ops->lib->dump_nic_event_log(priv, true, + NULL, false); + + return count; +} + + + +static ssize_t +iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct ieee80211_channel *channels = NULL; + const struct ieee80211_supported_band *supp_band = NULL; + int pos = 0, i, bufsz = PAGE_SIZE; + char *buf; + ssize_t ret; + + if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status)) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 2.4GHz band 802.11bg):\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ); + if (supp_band) { + channels = supp_band->channels; + + pos += scnprintf(buf + pos, bufsz - pos, + "Displaying %d channels in 5.2GHz band (802.11a)\n", + supp_band->n_channels); + + for (i = 0; i < supp_band->n_channels; i++) + pos += scnprintf(buf + pos, bufsz - pos, + "%d: %ddBm: BSS%s%s, %s.\n", + channels[i].hw_value, + channels[i].max_power, + channels[i].flags & IEEE80211_CHAN_RADAR ? + " (IEEE 802.11h required)" : "", + ((channels[i].flags & IEEE80211_CHAN_NO_IBSS) + || (channels[i].flags & + IEEE80211_CHAN_RADAR)) ? "" : + ", IBSS", + channels[i].flags & + IEEE80211_CHAN_PASSIVE_SCAN ? + "passive only" : "active/passive"); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[512]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n", + test_bit(STATUS_HCMD_ACTIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n", + test_bit(STATUS_INT_ENABLED, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", + test_bit(STATUS_RF_KILL_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", + test_bit(STATUS_CT_KILL, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n", + test_bit(STATUS_INIT, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", + test_bit(STATUS_ALIVE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", + test_bit(STATUS_READY, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n", + test_bit(STATUS_TEMPERATURE, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n", + test_bit(STATUS_GEO_CONFIGURED, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", + test_bit(STATUS_EXIT_PENDING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", + test_bit(STATUS_STATISTICS, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n", + test_bit(STATUS_SCANNING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n", + test_bit(STATUS_SCAN_ABORTING, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", + test_bit(STATUS_SCAN_HW, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", + test_bit(STATUS_POWER_PMI, &priv->status)); + pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", + test_bit(STATUS_FW_ERROR, &priv->status)); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = 24 * 64; /* 24 items * 64 char per item */ + ssize_t ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, + "Interrupt Statistics Report:\n"); + + pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", + priv->isr_stats.hw); + pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", + priv->isr_stats.sw); + if (priv->isr_stats.sw || priv->isr_stats.hw) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tLast Restarting Code: 0x%X\n", + priv->isr_stats.err_code); + } +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", + priv->isr_stats.sch); + pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", + priv->isr_stats.alive); +#endif + pos += scnprintf(buf + pos, bufsz - pos, + "HW RF KILL switch toggled:\t %u\n", + priv->isr_stats.rfkill); + + pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", + priv->isr_stats.ctkill); + + pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", + priv->isr_stats.wakeup); + + pos += scnprintf(buf + pos, bufsz - pos, + "Rx command responses:\t\t %u\n", + priv->isr_stats.rx); + for (cnt = 0; cnt < REPLY_MAX; cnt++) { + if (priv->isr_stats.rx_handlers[cnt] > 0) + pos += scnprintf(buf + pos, bufsz - pos, + "\tRx handler[%36s]:\t\t %u\n", + iwl_legacy_get_cmd_string(cnt), + priv->isr_stats.rx_handlers[cnt]); + } + + pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", + priv->isr_stats.tx); + + pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", + priv->isr_stats.unhandled); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + u32 reset_flag; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%x", &reset_flag) != 1) + return -EFAULT; + if (reset_flag == 0) + iwl_legacy_clear_isr_stats(priv); + + return count; +} + +static ssize_t +iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + struct iwl_rxon_context *ctx; + int pos = 0, i; + char buf[256 * NUM_IWL_RXON_CTX]; + const size_t bufsz = sizeof(buf); + + for_each_context(priv, ctx) { + pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", + ctx->ctxid); + for (i = 0; i < AC_NUM; i++) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tcw_min\tcw_max\taifsn\ttxop\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "AC[%d]\t%u\t%u\t%u\t%u\n", i, + ctx->qos_data.def_qos_parm.ac[i].cw_min, + ctx->qos_data.def_qos_parm.ac[i].cw_max, + ctx->qos_data.def_qos_parm.ac[i].aifsn, + ctx->qos_data.def_qos_parm.ac[i].edca_txop); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int ht40; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &ht40) != 1) + return -EFAULT; + if (!iwl_legacy_is_any_associated(priv)) + priv->disable_ht40 = ht40 ? true : false; + else { + IWL_ERR(priv, "Sta associated with AP - " + "Change to 40MHz channel support is not allowed\n"); + return -EINVAL; + } + + return count; +} + +static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[100]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, + "11n 40MHz Mode: %s\n", + priv->disable_ht40 ? "Disabled" : "Enabled"); + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +DEBUGFS_READ_WRITE_FILE_OPS(sram); +DEBUGFS_READ_WRITE_FILE_OPS(log_event); +DEBUGFS_READ_FILE_OPS(nvm); +DEBUGFS_READ_FILE_OPS(stations); +DEBUGFS_READ_FILE_OPS(channels); +DEBUGFS_READ_FILE_OPS(status); +DEBUGFS_READ_WRITE_FILE_OPS(interrupt); +DEBUGFS_READ_FILE_OPS(qos); +DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); + +static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + int pos = 0, ofs = 0; + int cnt = 0, entry; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + struct iwl_rx_queue *rxq = &priv->rxq; + char *buf; + int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) + + (priv->cfg->base_params->num_of_queues * 32 * 8) + 400; + const u8 *ptr; + ssize_t ret; + + if (!priv->txq) { + IWL_ERR(priv, "txq not ready\n"); + return -EAGAIN; + } + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate buffer\n"); + return -ENOMEM; + } + pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n"); + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + txq = &priv->txq[cnt]; + q = &txq->q; + pos += scnprintf(buf + pos, bufsz - pos, + "q[%d]: read_ptr: %u, write_ptr: %u\n", + cnt, q->read_ptr, q->write_ptr); + } + if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) { + ptr = priv->tx_traffic; + pos += scnprintf(buf + pos, bufsz - pos, + "Tx Traffic idx: %u\n", priv->tx_traffic_idx); + for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { + for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; + entry++, ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, + "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 2, + buf + pos, bufsz - pos, 0); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } + } + + pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n"); + pos += scnprintf(buf + pos, bufsz - pos, + "read: %u, write: %u\n", + rxq->read, rxq->write); + + if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) { + ptr = priv->rx_traffic; + pos += scnprintf(buf + pos, bufsz - pos, + "Rx Traffic idx: %u\n", priv->rx_traffic_idx); + for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) { + for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16; + entry++, ofs += 16) { + pos += scnprintf(buf + pos, bufsz - pos, + "0x%.4x ", ofs); + hex_dump_to_buffer(ptr + ofs, 16, 16, 2, + buf + pos, bufsz - pos, 0); + pos += strlen(buf + pos); + if (bufsz - pos > 0) + buf[pos++] = '\n'; + } + } + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int traffic_log; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &traffic_log) != 1) + return -EFAULT; + if (traffic_log == 0) + iwl_legacy_reset_traffic_log(priv); + + return count; +} + +static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + char *buf; + int pos = 0; + int cnt; + int ret; + const size_t bufsz = sizeof(char) * 64 * + priv->cfg->base_params->num_of_queues; + + if (!priv->txq) { + IWL_ERR(priv, "txq not ready\n"); + return -EAGAIN; + } + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { + txq = &priv->txq[cnt]; + q = &txq->q; + pos += scnprintf(buf + pos, bufsz - pos, + "hwq %.2d: read=%u write=%u stop=%d" + " swq_id=%#.2x (ac %d/hwq %d)\n", + cnt, q->read_ptr, q->write_ptr, + !!test_bit(cnt, priv->queue_stopped), + txq->swq_id, txq->swq_id & 3, + (txq->swq_id >> 2) & 0x1f); + if (cnt >= 4) + continue; + /* for the ACs, display the stop count too */ + pos += scnprintf(buf + pos, bufsz - pos, + " stop-count: %d\n", + atomic_read(&priv->queue_stop_count[cnt])); + } + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + struct iwl_rx_queue *rxq = &priv->rxq; + char buf[256]; + int pos = 0; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", + rxq->read); + pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", + rxq->write); + pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", + rxq->free_count); + if (rxq->rb_stts) { + pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", + le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "closed_rb_num: Not Allocated\n"); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file, + user_buf, count, ppos); +} + +static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file, + user_buf, count, ppos); +} + +static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file, + user_buf, count, ppos); +} + +static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100; + ssize_t ret; + struct iwl_sensitivity_data *data; + + data = &priv->sensitivity_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", + data->auto_corr_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc:\t\t %u\n", + data->auto_corr_ofdm_mrc); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n", + data->auto_corr_ofdm_x1); + pos += scnprintf(buf + pos, bufsz - pos, + "auto_corr_ofdm_mrc_x1:\t\t %u\n", + data->auto_corr_ofdm_mrc_x1); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n", + data->auto_corr_cck); + pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n", + data->auto_corr_cck_mrc); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_ofdm:\t\t %u\n", + data->last_bad_plcp_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n", + data->last_fa_cnt_ofdm); + pos += scnprintf(buf + pos, bufsz - pos, + "last_bad_plcp_cnt_cck:\t\t %u\n", + data->last_bad_plcp_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n", + data->last_fa_cnt_cck); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n", + data->nrg_curr_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n", + data->nrg_prev_state); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t"); + for (cnt = 0; cnt < 10; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_value[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t"); + for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->nrg_silence_rssi[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n", + data->nrg_silence_ref); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n", + data->nrg_energy_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n", + data->nrg_silence_idx); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n", + data->nrg_th_cck); + pos += scnprintf(buf + pos, bufsz - pos, + "nrg_auto_corr_silence_diff:\t %u\n", + data->nrg_auto_corr_silence_diff); + pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n", + data->num_in_cck_no_fa); + pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n", + data->nrg_th_ofdm); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + + +static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + int cnt = 0; + char *buf; + int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100; + ssize_t ret; + struct iwl_chain_noise_data *data; + + data = &priv->chain_noise_data; + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) { + IWL_ERR(priv, "Can not allocate Buffer\n"); + return -ENOMEM; + } + + pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", + data->active_chains); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n", + data->chain_noise_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n", + data->chain_noise_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n", + data->chain_noise_c); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n", + data->chain_signal_a); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n", + data->chain_signal_b); + pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n", + data->chain_signal_c); + pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n", + data->beacon_count); + + pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->disconn_array[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t"); + for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) { + pos += scnprintf(buf + pos, bufsz - pos, " %u", + data->delta_gain_code[cnt]); + } + pos += scnprintf(buf + pos, bufsz - pos, "\n"); + pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n", + data->radio_write); + pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n", + data->state); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + return ret; +} + +static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[60]; + int pos = 0; + const size_t bufsz = sizeof(buf); + u32 pwrsave_status; + + pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_REG_POWER_SAVE_STATUS_MSK; + + pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); + pos += scnprintf(buf + pos, bufsz - pos, "%s\n", + (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" : + (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" : + (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" : + "error"); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int clear; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &clear) != 1) + return -EFAULT; + + /* make request to uCode to retrieve statistics information */ + mutex_lock(&priv->mutex); + iwl_legacy_send_statistics_request(priv, CMD_SYNC, true); + mutex_unlock(&priv->mutex); + + return count; +} + +static ssize_t iwl_legacy_dbgfs_ucode_tracing_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[128]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n", + priv->event_log.ucode_trace ? "On" : "Off"); + pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n", + priv->event_log.non_wraps_count); + pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n", + priv->event_log.wraps_once_count); + pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n", + priv->event_log.wraps_more_count); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_ucode_tracing_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int trace; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &trace) != 1) + return -EFAULT; + + if (trace) { + priv->event_log.ucode_trace = true; + /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */ + mod_timer(&priv->ucode_trace, + jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD)); + } else { + priv->event_log.ucode_trace = false; + del_timer_sync(&priv->ucode_trace); + } + + return count; +} + +static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int len = 0; + char buf[20]; + + len = sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int len = 0; + char buf[20]; + + len = sprintf(buf, "0x%04X\n", + le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags)); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char *buf; + int pos = 0; + ssize_t ret = -EFAULT; + + if (priv->cfg->ops->lib->dump_fh) { + ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true); + if (buf) { + ret = simple_read_from_buffer(user_buf, + count, ppos, buf, pos); + kfree(buf); + } + } + + return ret; +} + +static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "%d\n", + priv->missed_beacon_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int missed; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &missed) != 1) + return -EINVAL; + + if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN || + missed > IWL_MISSED_BEACON_THRESHOLD_MAX) + priv->missed_beacon_threshold = + IWL_MISSED_BEACON_THRESHOLD_DEF; + else + priv->missed_beacon_threshold = missed; + + return count; +} + +static ssize_t iwl_legacy_dbgfs_plcp_delta_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int pos = 0; + char buf[12]; + const size_t bufsz = sizeof(buf); + + pos += scnprintf(buf + pos, bufsz - pos, "%u\n", + priv->cfg->base_params->plcp_delta_threshold); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_plcp_delta_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int plcp; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &plcp) != 1) + return -EINVAL; + if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) || + (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX)) + priv->cfg->base_params->plcp_delta_threshold = + IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE; + else + priv->cfg->base_params->plcp_delta_threshold = plcp; + return count; +} + +static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + int i, pos = 0; + char buf[300]; + const size_t bufsz = sizeof(buf); + struct iwl_force_reset *force_reset; + + for (i = 0; i < IWL_MAX_FORCE_RESET; i++) { + force_reset = &priv->force_reset[i]; + pos += scnprintf(buf + pos, bufsz - pos, + "Force reset method %d\n", i); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request: %d\n", + force_reset->reset_request_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request success: %d\n", + force_reset->reset_success_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\tnumber of reset request reject: %d\n", + force_reset->reset_reject_count); + pos += scnprintf(buf + pos, bufsz - pos, + "\treset duration: %lu\n", + force_reset->reset_duration); + } + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int reset, ret; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &reset) != 1) + return -EINVAL; + switch (reset) { + case IWL_RF_RESET: + case IWL_FW_RESET: + ret = iwl_legacy_force_reset(priv, reset, true); + break; + default: + return -EINVAL; + } + return ret ? ret : count; +} + +static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) { + + struct iwl_priv *priv = file->private_data; + char buf[8]; + int buf_size; + int timeout; + + memset(buf, 0, sizeof(buf)); + buf_size = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + if (sscanf(buf, "%d", &timeout) != 1) + return -EINVAL; + if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT) + timeout = IWL_DEF_WD_TIMEOUT; + + priv->cfg->base_params->wd_timeout = timeout; + iwl_legacy_setup_watchdog(priv); + return count; +} + +DEBUGFS_READ_FILE_OPS(rx_statistics); +DEBUGFS_READ_FILE_OPS(tx_statistics); +DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); +DEBUGFS_READ_FILE_OPS(rx_queue); +DEBUGFS_READ_FILE_OPS(tx_queue); +DEBUGFS_READ_FILE_OPS(ucode_rx_stats); +DEBUGFS_READ_FILE_OPS(ucode_tx_stats); +DEBUGFS_READ_FILE_OPS(ucode_general_stats); +DEBUGFS_READ_FILE_OPS(sensitivity); +DEBUGFS_READ_FILE_OPS(chain_noise); +DEBUGFS_READ_FILE_OPS(power_save_status); +DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); +DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); +DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); +DEBUGFS_READ_FILE_OPS(fh_reg); +DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); +DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); +DEBUGFS_READ_WRITE_FILE_OPS(force_reset); +DEBUGFS_READ_FILE_OPS(rxon_flags); +DEBUGFS_READ_FILE_OPS(rxon_filter_flags); +DEBUGFS_WRITE_FILE_OPS(wd_timeout); + +/* + * Create the debugfs files and directories + * + */ +int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name) +{ + struct dentry *phyd = priv->hw->wiphy->debugfsdir; + struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug; + + dir_drv = debugfs_create_dir(name, phyd); + if (!dir_drv) + return -ENOMEM; + + priv->debugfs_dir = dir_drv; + + dir_data = debugfs_create_dir("data", dir_drv); + if (!dir_data) + goto err; + dir_rf = debugfs_create_dir("rf", dir_drv); + if (!dir_rf) + goto err; + dir_debug = debugfs_create_dir("debug", dir_drv); + if (!dir_debug) + goto err; + + DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); + DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); + DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); + + if (priv->cfg->base_params->sensitivity_calib_by_driver) + DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); + if (priv->cfg->base_params->chain_noise_calib_by_driver) + DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); + if (priv->cfg->base_params->ucode_tracing) + DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); + DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); + DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); + if (priv->cfg->base_params->sensitivity_calib_by_driver) + DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, + &priv->disable_sens_cal); + if (priv->cfg->base_params->chain_noise_calib_by_driver) + DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, + &priv->disable_chain_noise_cal); + DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, + &priv->disable_tx_power_cal); + return 0; + +err: + IWL_ERR(priv, "Can't create the debugfs directory\n"); + iwl_legacy_dbgfs_unregister(priv); + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_legacy_dbgfs_register); + +/** + * Remove the debugfs files and directories + * + */ +void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv) +{ + if (!priv->debugfs_dir) + return; + + debugfs_remove_recursive(priv->debugfs_dir); + priv->debugfs_dir = NULL; +} +EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister); diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h new file mode 100644 index 000000000000..9ee849d669f3 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-dev.h @@ -0,0 +1,1426 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +/* + * Please use this file (iwl-dev.h) for driver implementation definitions. + * Please use iwl-commands.h for uCode API definitions. + * Please use iwl-4965-hw.h for hardware-related definitions. + */ + +#ifndef __iwl_legacy_dev_h__ +#define __iwl_legacy_dev_h__ + +#include <linux/pci.h> /* for struct pci_device_id */ +#include <linux/kernel.h> +#include <linux/leds.h> +#include <linux/wait.h> +#include <net/ieee80211_radiotap.h> + +#include "iwl-eeprom.h" +#include "iwl-csr.h" +#include "iwl-prph.h" +#include "iwl-fh.h" +#include "iwl-debug.h" +#include "iwl-4965-hw.h" +#include "iwl-3945-hw.h" +#include "iwl-led.h" +#include "iwl-power.h" +#include "iwl-legacy-rs.h" + +struct iwl_tx_queue; + +/* CT-KILL constants */ +#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ + +/* Default noise level to report when noise measurement is not available. + * This may be because we're: + * 1) Not associated (4965, no beacon statistics being sent to driver) + * 2) Scanning (noise measurement does not apply to associated channel) + * 3) Receiving CCK (3945 delivers noise info only for OFDM frames) + * Use default noise value of -127 ... this is below the range of measurable + * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user. + * Also, -127 works better than 0 when averaging frames with/without + * noise info (e.g. averaging might be done in app); measured dBm values are + * always negative ... using a negative value as the default keeps all + * averages within an s8's (used in some apps) range of negative values. */ +#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) + +/* + * RTS threshold here is total size [2347] minus 4 FCS bytes + * Per spec: + * a value of 0 means RTS on all data/management packets + * a value > max MSDU size means no RTS + * else RTS for data/management frames where MPDU is larger + * than RTS value. + */ +#define DEFAULT_RTS_THRESHOLD 2347U +#define MIN_RTS_THRESHOLD 0U +#define MAX_RTS_THRESHOLD 2347U +#define MAX_MSDU_SIZE 2304U +#define MAX_MPDU_SIZE 2346U +#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_SHORT_RETRY_LIMIT 7U +#define DEFAULT_LONG_RETRY_LIMIT 4U + +struct iwl_rx_mem_buffer { + dma_addr_t page_dma; + struct page *page; + struct list_head list; +}; + +#define rxb_addr(r) page_address(r->page) + +/* defined below */ +struct iwl_device_cmd; + +struct iwl_cmd_meta { + /* only for SYNC commands, iff the reply skb is wanted */ + struct iwl_host_cmd *source; + /* + * only for ASYNC commands + * (which is somewhat stupid -- look at iwl-sta.c for instance + * which duplicates a bunch of code because the callback isn't + * invoked for SYNC commands, if it were and its result passed + * through it would be simpler...) + */ + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt); + + /* The CMD_SIZE_HUGE flag bit indicates that the command + * structure is stored at the end of the shared queue memory. */ + u32 flags; + + DEFINE_DMA_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_LEN(len); +}; + +/* + * Generic queue structure + * + * Contains common data for Rx and Tx queues + */ +struct iwl_queue { + int n_bd; /* number of BDs in this queue */ + int write_ptr; /* 1-st empty entry (index) host_w*/ + int read_ptr; /* last used entry (index) host_r*/ + /* use for monitoring and recovering the stuck queue */ + dma_addr_t dma_addr; /* physical addr for BD's */ + int n_window; /* safe queue window */ + u32 id; + int low_mark; /* low watermark, resume queue if free + * space more than this */ + int high_mark; /* high watermark, stop queue if free + * space less than this */ +} __packed; + +/* One for each TFD */ +struct iwl_tx_info { + struct sk_buff *skb; + struct iwl_rxon_context *ctx; +}; + +/** + * struct iwl_tx_queue - Tx Queue for DMA + * @q: generic Rx/Tx queue descriptor + * @bd: base of circular buffer of TFDs + * @cmd: array of command/TX buffer pointers + * @meta: array of meta data for each command/tx buffer + * @dma_addr_cmd: physical address of cmd/tx buffer array + * @txb: array of per-TFD driver data + * @time_stamp: time (in jiffies) of last read_ptr change + * @need_update: indicates need to update read/write index + * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled + * + * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame + * descriptors) and required locking structures. + */ +#define TFD_TX_CMD_SLOTS 256 +#define TFD_CMD_SLOTS 32 + +struct iwl_tx_queue { + struct iwl_queue q; + void *tfds; + struct iwl_device_cmd **cmd; + struct iwl_cmd_meta *meta; + struct iwl_tx_info *txb; + unsigned long time_stamp; + u8 need_update; + u8 sched_retry; + u8 active; + u8 swq_id; +}; + +#define IWL_NUM_SCAN_RATES (2) + +struct iwl4965_channel_tgd_info { + u8 type; + s8 max_power; +}; + +struct iwl4965_channel_tgh_info { + s64 last_radar_time; +}; + +#define IWL4965_MAX_RATE (33) + +struct iwl3945_clip_group { + /* maximum power level to prevent clipping for each rate, derived by + * us from this band's saturation power in EEPROM */ + const s8 clip_powers[IWL_MAX_RATES]; +}; + +/* current Tx power values to use, one for each rate for each channel. + * requested power is limited by: + * -- regulatory EEPROM limits for this channel + * -- hardware capabilities (clip-powers) + * -- spectrum management + * -- user preference (e.g. iwconfig) + * when requested power is set, base power index must also be set. */ +struct iwl3945_channel_power_info { + struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 base_power_index; /* gain index for power at factory temp. */ + s8 requested_power; /* power (dBm) requested for this chnl/rate */ +}; + +/* current scan Tx power values to use, one for each scan rate for each + * channel. */ +struct iwl3945_scan_power_info { + struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */ + s8 power_table_index; /* actual (compenst'd) index into gain table */ + s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */ +}; + +/* + * One for each channel, holds all channel setup data + * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant + * with one another! + */ +struct iwl_channel_info { + struct iwl4965_channel_tgd_info tgd; + struct iwl4965_channel_tgh_info tgh; + struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */ + struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for + * HT40 channel */ + + u8 channel; /* channel number */ + u8 flags; /* flags copied from EEPROM */ + s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */ + s8 min_power; /* always 0 */ + s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */ + + u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */ + u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */ + enum ieee80211_band band; + + /* HT40 channel info */ + s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ + u8 ht40_flags; /* flags copied from EEPROM */ + u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */ + + /* Radio/DSP gain settings for each "normal" data Tx rate. + * These include, in addition to RF and DSP gain, a few fields for + * remembering/modifying gain settings (indexes). */ + struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE]; + + /* Radio/DSP gain settings for each scan rate, for directed scans. */ + struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES]; +}; + +#define IWL_TX_FIFO_BK 0 /* shared */ +#define IWL_TX_FIFO_BE 1 +#define IWL_TX_FIFO_VI 2 /* shared */ +#define IWL_TX_FIFO_VO 3 +#define IWL_TX_FIFO_UNUSED -1 + +/* Minimum number of queues. MAX_NUM is defined in hw specific files. + * Set the minimum to accommodate the 4 standard TX queues, 1 command + * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ +#define IWL_MIN_NUM_QUEUES 10 + +#define IWL_DEFAULT_CMD_QUEUE_NUM 4 + +#define IEEE80211_DATA_LEN 2304 +#define IEEE80211_4ADDR_LEN 30 +#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) +#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN) + +struct iwl_frame { + union { + struct ieee80211_hdr frame; + struct iwl_tx_beacon_cmd beacon; + u8 raw[IEEE80211_FRAME_LEN]; + u8 cmd[360]; + } u; + struct list_head list; +}; + +#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) +#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) +#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) + +enum { + CMD_SYNC = 0, + CMD_SIZE_NORMAL = 0, + CMD_NO_SKB = 0, + CMD_SIZE_HUGE = (1 << 0), + CMD_ASYNC = (1 << 1), + CMD_WANT_SKB = (1 << 2), +}; + +#define DEF_CMD_PAYLOAD_SIZE 320 + +/** + * struct iwl_device_cmd + * + * For allocation of the command and tx queues, this establishes the overall + * size of the largest command we send to uCode, except for a scan command + * (which is relatively huge; space is allocated separately). + */ +struct iwl_device_cmd { + struct iwl_cmd_header hdr; /* uCode API */ + union { + u32 flags; + u8 val8; + u16 val16; + u32 val32; + struct iwl_tx_cmd tx; + u8 payload[DEF_CMD_PAYLOAD_SIZE]; + } __packed cmd; +} __packed; + +#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) + + +struct iwl_host_cmd { + const void *data; + unsigned long reply_page; + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt); + u32 flags; + u16 len; + u8 id; +}; + +#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 +#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 +#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 + +/** + * struct iwl_rx_queue - Rx queue + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) + * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @read: Shared index to newest available Rx buffer + * @write: Shared index to oldest written Rx packet + * @free_count: Number of pre-allocated buffers in rx_free + * @rx_free: list of free SKBs for use + * @rx_used: List of Rx buffers with no SKB + * @need_update: flag to indicate we need to update read/write index + * @rb_stts: driver's pointer to receive buffer status + * @rb_stts_dma: bus address of receive buffer status + * + * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers + */ +struct iwl_rx_queue { + __le32 *bd; + dma_addr_t bd_dma; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; + u32 read; + u32 write; + u32 free_count; + u32 write_actual; + struct list_head rx_free; + struct list_head rx_used; + int need_update; + struct iwl_rb_status *rb_stts; + dma_addr_t rb_stts_dma; + spinlock_t lock; +}; + +#define IWL_SUPPORTED_RATES_IE_LEN 8 + +#define MAX_TID_COUNT 9 + +#define IWL_INVALID_RATE 0xFF +#define IWL_INVALID_VALUE -1 + +/** + * struct iwl_ht_agg -- aggregation status while waiting for block-ack + * @txq_id: Tx queue used for Tx attempt + * @frame_count: # frames attempted by Tx command + * @wait_for_ba: Expect block-ack before next Tx reply + * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window + * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window + * @bitmap1: High order, one bit for each frame pending ACK in Tx window + * @rate_n_flags: Rate at which Tx was attempted + * + * If REPLY_TX indicates that aggregation was attempted, driver must wait + * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info + * until block ack arrives. + */ +struct iwl_ht_agg { + u16 txq_id; + u16 frame_count; + u16 wait_for_ba; + u16 start_idx; + u64 bitmap; + u32 rate_n_flags; +#define IWL_AGG_OFF 0 +#define IWL_AGG_ON 1 +#define IWL_EMPTYING_HW_QUEUE_ADDBA 2 +#define IWL_EMPTYING_HW_QUEUE_DELBA 3 + u8 state; +}; + + +struct iwl_tid_data { + u16 seq_number; /* 4965 only */ + u16 tfds_in_queue; + struct iwl_ht_agg agg; +}; + +struct iwl_hw_key { + u32 cipher; + int keylen; + u8 keyidx; + u8 key[32]; +}; + +union iwl_ht_rate_supp { + u16 rates; + struct { + u8 siso_rate; + u8 mimo_rate; + }; +}; + +#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0) +#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1) +#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2) +#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3) +#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K +#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K +#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K + +/* + * Maximal MPDU density for TX aggregation + * 4 - 2us density + * 5 - 4us density + * 6 - 8us density + * 7 - 16us density + */ +#define CFG_HT_MPDU_DENSITY_2USEC (0x4) +#define CFG_HT_MPDU_DENSITY_4USEC (0x5) +#define CFG_HT_MPDU_DENSITY_8USEC (0x6) +#define CFG_HT_MPDU_DENSITY_16USEC (0x7) +#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC +#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC +#define CFG_HT_MPDU_DENSITY_MIN (0x1) + +struct iwl_ht_config { + bool single_chain_sufficient; + enum ieee80211_smps_mode smps; /* current smps mode */ +}; + +/* QoS structures */ +struct iwl_qos_info { + int qos_active; + struct iwl_qosparam_cmd def_qos_parm; +}; + +/* + * Structure should be accessed with sta_lock held. When station addition + * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only + * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without + * sta_lock held. + */ +struct iwl_station_entry { + struct iwl_legacy_addsta_cmd sta; + struct iwl_tid_data tid[MAX_TID_COUNT]; + u8 used, ctxid; + struct iwl_hw_key keyinfo; + struct iwl_link_quality_cmd *lq; +}; + +struct iwl_station_priv_common { + struct iwl_rxon_context *ctx; + u8 sta_id; +}; + +/* + * iwl_station_priv: Driver's private station information + * + * When mac80211 creates a station it reserves some space (hw->sta_data_size) + * in the structure for use by driver. This structure is places in that + * space. + * + * The common struct MUST be first because it is shared between + * 3945 and 4965! + */ +struct iwl_station_priv { + struct iwl_station_priv_common common; + struct iwl_lq_sta lq_sta; + atomic_t pending_frames; + bool client; + bool asleep; +}; + +/** + * struct iwl_vif_priv - driver's private per-interface information + * + * When mac80211 allocates a virtual interface, it can allocate + * space for us to put data into. + */ +struct iwl_vif_priv { + struct iwl_rxon_context *ctx; + u8 ibss_bssid_sta_id; +}; + +/* one for each uCode image (inst/data, boot/init/runtime) */ +struct fw_desc { + void *v_addr; /* access by driver */ + dma_addr_t p_addr; /* access by card's busmaster DMA */ + u32 len; /* bytes */ +}; + +/* uCode file layout */ +struct iwl_ucode_header { + __le32 ver; /* major/minor/API/serial */ + struct { + __le32 inst_size; /* bytes of runtime code */ + __le32 data_size; /* bytes of runtime data */ + __le32 init_size; /* bytes of init code */ + __le32 init_data_size; /* bytes of init data */ + __le32 boot_size; /* bytes of bootstrap code */ + u8 data[0]; /* in same order as sizes */ + } v1; +}; + +struct iwl4965_ibss_seq { + u8 mac[ETH_ALEN]; + u16 seq_num; + u16 frag_num; + unsigned long packet_time; + struct list_head list; +}; + +struct iwl_sensitivity_ranges { + u16 min_nrg_cck; + u16 max_nrg_cck; + + u16 nrg_th_cck; + u16 nrg_th_ofdm; + + u16 auto_corr_min_ofdm; + u16 auto_corr_min_ofdm_mrc; + u16 auto_corr_min_ofdm_x1; + u16 auto_corr_min_ofdm_mrc_x1; + + u16 auto_corr_max_ofdm; + u16 auto_corr_max_ofdm_mrc; + u16 auto_corr_max_ofdm_x1; + u16 auto_corr_max_ofdm_mrc_x1; + + u16 auto_corr_max_cck; + u16 auto_corr_max_cck_mrc; + u16 auto_corr_min_cck; + u16 auto_corr_min_cck_mrc; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + + +#define KELVIN_TO_CELSIUS(x) ((x)-273) +#define CELSIUS_TO_KELVIN(x) ((x)+273) + + +/** + * struct iwl_hw_params + * @max_txq_num: Max # Tx queues supported + * @dma_chnl_num: Number of Tx DMA/FIFO channels + * @scd_bc_tbls_size: size of scheduler byte count tables + * @tfd_size: TFD size + * @tx/rx_chains_num: Number of TX/RX chains + * @valid_tx/rx_ant: usable antennas + * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) + * @max_rxq_log: Log-base-2 of max_rxq_size + * @rx_page_order: Rx buffer page order + * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR + * @max_stations: + * @ht40_channel: is 40MHz width possible in band 2.4 + * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ) + * @sw_crypto: 0 for hw, 1 for sw + * @max_xxx_size: for ucode uses + * @ct_kill_threshold: temperature threshold + * @beacon_time_tsf_bits: number of valid tsf bits for beacon time + * @struct iwl_sensitivity_ranges: range of sensitivity values + */ +struct iwl_hw_params { + u8 max_txq_num; + u8 dma_chnl_num; + u16 scd_bc_tbls_size; + u32 tfd_size; + u8 tx_chains_num; + u8 rx_chains_num; + u8 valid_tx_ant; + u8 valid_rx_ant; + u16 max_rxq_size; + u16 max_rxq_log; + u32 rx_page_order; + u32 rx_wrt_ptr_reg; + u8 max_stations; + u8 ht40_channel; + u8 max_beacon_itrvl; /* in 1024 ms */ + u32 max_inst_size; + u32 max_data_size; + u32 max_bsm_size; + u32 ct_kill_threshold; /* value in hw-dependent units */ + u16 beacon_time_tsf_bits; + const struct iwl_sensitivity_ranges *sens; +}; + + +/****************************************************************************** + * + * Functions implemented in core module which are forward declared here + * for use by iwl-[4-5].c + * + * NOTE: The implementation of these functions are not hardware specific + * which is why they are in the core module files. + * + * Naming convention -- + * iwl_ <-- Is part of iwlwifi + * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX) + * iwl4965_bg_ <-- Called from work queue context + * iwl4965_mac_ <-- mac80211 callback + * + ****************************************************************************/ +extern void iwl4965_update_chain_flags(struct iwl_priv *priv); +extern const u8 iwlegacy_bcast_addr[ETH_ALEN]; +extern int iwl_legacy_queue_space(const struct iwl_queue *q); +static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i) +{ + return q->write_ptr >= q->read_ptr ? + (i >= q->read_ptr && i < q->write_ptr) : + !(i < q->read_ptr && i >= q->write_ptr); +} + + +static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index, + int is_huge) +{ + /* + * This is for init calibration result and scan command which + * required buffer > TFD_MAX_PAYLOAD_SIZE, + * the big buffer at end of command array + */ + if (is_huge) + return q->n_window; /* must be power of 2 */ + + /* Otherwise, use normal size buffers */ + return index & (q->n_window - 1); +} + + +struct iwl_dma_ptr { + dma_addr_t dma; + void *addr; + size_t size; +}; + +#define IWL_OPERATION_MODE_AUTO 0 +#define IWL_OPERATION_MODE_HT_ONLY 1 +#define IWL_OPERATION_MODE_MIXED 2 +#define IWL_OPERATION_MODE_20MHZ 3 + +#define IWL_TX_CRC_SIZE 4 +#define IWL_TX_DELIMITER_SIZE 4 + +#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 + +/* Sensitivity and chain noise calibration */ +#define INITIALIZATION_VALUE 0xFFFF +#define IWL4965_CAL_NUM_BEACONS 20 +#define IWL_CAL_NUM_BEACONS 16 +#define MAXIMUM_ALLOWED_PATHLOSS 15 + +#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 + +#define MAX_FA_OFDM 50 +#define MIN_FA_OFDM 5 +#define MAX_FA_CCK 50 +#define MIN_FA_CCK 5 + +#define AUTO_CORR_STEP_OFDM 1 + +#define AUTO_CORR_STEP_CCK 3 +#define AUTO_CORR_MAX_TH_CCK 160 + +#define NRG_DIFF 2 +#define NRG_STEP_CCK 2 +#define NRG_MARGIN 8 +#define MAX_NUMBER_CCK_NO_FA 100 + +#define AUTO_CORR_CCK_MIN_VAL_DEF (125) + +#define CHAIN_A 0 +#define CHAIN_B 1 +#define CHAIN_C 2 +#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4 +#define ALL_BAND_FILTER 0xFF00 +#define IN_BAND_FILTER 0xFF +#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF + +#define NRG_NUM_PREV_STAT_L 20 +#define NUM_RX_CHAINS 3 + +enum iwl4965_false_alarm_state { + IWL_FA_TOO_MANY = 0, + IWL_FA_TOO_FEW = 1, + IWL_FA_GOOD_RANGE = 2, +}; + +enum iwl4965_chain_noise_state { + IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */ + IWL_CHAIN_NOISE_ACCUMULATE, + IWL_CHAIN_NOISE_CALIBRATED, + IWL_CHAIN_NOISE_DONE, +}; + +enum iwl4965_calib_enabled_state { + IWL_CALIB_DISABLED = 0, /* must be 0 */ + IWL_CALIB_ENABLED = 1, +}; + +/* + * enum iwl_calib + * defines the order in which results of initial calibrations + * should be sent to the runtime uCode + */ +enum iwl_calib { + IWL_CALIB_MAX, +}; + +/* Opaque calibration results */ +struct iwl_calib_result { + void *buf; + size_t buf_len; +}; + +enum ucode_type { + UCODE_NONE = 0, + UCODE_INIT, + UCODE_RT +}; + +/* Sensitivity calib data */ +struct iwl_sensitivity_data { + u32 auto_corr_ofdm; + u32 auto_corr_ofdm_mrc; + u32 auto_corr_ofdm_x1; + u32 auto_corr_ofdm_mrc_x1; + u32 auto_corr_cck; + u32 auto_corr_cck_mrc; + + u32 last_bad_plcp_cnt_ofdm; + u32 last_fa_cnt_ofdm; + u32 last_bad_plcp_cnt_cck; + u32 last_fa_cnt_cck; + + u32 nrg_curr_state; + u32 nrg_prev_state; + u32 nrg_value[10]; + u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L]; + u32 nrg_silence_ref; + u32 nrg_energy_idx; + u32 nrg_silence_idx; + u32 nrg_th_cck; + s32 nrg_auto_corr_silence_diff; + u32 num_in_cck_no_fa; + u32 nrg_th_ofdm; + + u16 barker_corr_th_min; + u16 barker_corr_th_min_mrc; + u16 nrg_th_cca; +}; + +/* Chain noise (differential Rx gain) calib data */ +struct iwl_chain_noise_data { + u32 active_chains; + u32 chain_noise_a; + u32 chain_noise_b; + u32 chain_noise_c; + u32 chain_signal_a; + u32 chain_signal_b; + u32 chain_signal_c; + u16 beacon_count; + u8 disconn_array[NUM_RX_CHAINS]; + u8 delta_gain_code[NUM_RX_CHAINS]; + u8 radio_write; + u8 state; +}; + +#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */ +#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + +#define IWL_TRAFFIC_ENTRIES (256) +#define IWL_TRAFFIC_ENTRY_SIZE (64) + +enum { + MEASUREMENT_READY = (1 << 0), + MEASUREMENT_ACTIVE = (1 << 1), +}; + +/* interrupt statistics */ +struct isr_statistics { + u32 hw; + u32 sw; + u32 err_code; + u32 sch; + u32 alive; + u32 rfkill; + u32 ctkill; + u32 wakeup; + u32 rx; + u32 rx_handlers[REPLY_MAX]; + u32 tx; + u32 unhandled; +}; + +/* management statistics */ +enum iwl_mgmt_stats { + MANAGEMENT_ASSOC_REQ = 0, + MANAGEMENT_ASSOC_RESP, + MANAGEMENT_REASSOC_REQ, + MANAGEMENT_REASSOC_RESP, + MANAGEMENT_PROBE_REQ, + MANAGEMENT_PROBE_RESP, + MANAGEMENT_BEACON, + MANAGEMENT_ATIM, + MANAGEMENT_DISASSOC, + MANAGEMENT_AUTH, + MANAGEMENT_DEAUTH, + MANAGEMENT_ACTION, + MANAGEMENT_MAX, +}; +/* control statistics */ +enum iwl_ctrl_stats { + CONTROL_BACK_REQ = 0, + CONTROL_BACK, + CONTROL_PSPOLL, + CONTROL_RTS, + CONTROL_CTS, + CONTROL_ACK, + CONTROL_CFEND, + CONTROL_CFENDACK, + CONTROL_MAX, +}; + +struct traffic_stats { +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + u32 mgmt[MANAGEMENT_MAX]; + u32 ctrl[CONTROL_MAX]; + u32 data_cnt; + u64 data_bytes; +#endif +}; + +/* + * iwl_switch_rxon: "channel switch" structure + * + * @ switch_in_progress: channel switch in progress + * @ channel: new channel + */ +struct iwl_switch_rxon { + bool switch_in_progress; + __le16 channel; +}; + +/* + * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds + * to perform continuous uCode event logging operation if enabled + */ +#define UCODE_TRACE_PERIOD (100) + +/* + * iwl_event_log: current uCode event log position + * + * @ucode_trace: enable/disable ucode continuous trace timer + * @num_wraps: how many times the event buffer wraps + * @next_entry: the entry just before the next one that uCode would fill + * @non_wraps_count: counter for no wrap detected when dump ucode events + * @wraps_once_count: counter for wrap once detected when dump ucode events + * @wraps_more_count: counter for wrap more than once detected + * when dump ucode events + */ +struct iwl_event_log { + bool ucode_trace; + u32 num_wraps; + u32 next_entry; + int non_wraps_count; + int wraps_once_count; + int wraps_more_count; +}; + +/* + * host interrupt timeout value + * used with setting interrupt coalescing timer + * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit + * + * default interrupt coalescing timer is 64 x 32 = 2048 usecs + * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs + */ +#define IWL_HOST_INT_TIMEOUT_MAX (0xFF) +#define IWL_HOST_INT_TIMEOUT_DEF (0x40) +#define IWL_HOST_INT_TIMEOUT_MIN (0x0) +#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF) +#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10) +#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0) + +/* + * This is the threshold value of plcp error rate per 100mSecs. It is + * used to set and check for the validity of plcp_delta. + */ +#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1) +#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50) +#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100) +#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200) +#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255) +#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0) + +#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) +#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) + +/* TX queue watchdog timeouts in mSecs */ +#define IWL_DEF_WD_TIMEOUT (2000) +#define IWL_LONG_WD_TIMEOUT (10000) +#define IWL_MAX_WD_TIMEOUT (120000) + +enum iwl_reset { + IWL_RF_RESET = 0, + IWL_FW_RESET, + IWL_MAX_FORCE_RESET, +}; + +struct iwl_force_reset { + int reset_request_count; + int reset_success_count; + int reset_reject_count; + unsigned long reset_duration; + unsigned long last_force_reset_jiffies; +}; + +/* extend beacon time format bit shifting */ +/* + * for _3945 devices + * bits 31:24 - extended + * bits 23:0 - interval + */ +#define IWL3945_EXT_BEACON_TIME_POS 24 +/* + * for _4965 devices + * bits 31:22 - extended + * bits 21:0 - interval + */ +#define IWL4965_EXT_BEACON_TIME_POS 22 + +enum iwl_rxon_context_id { + IWL_RXON_CTX_BSS, + + NUM_IWL_RXON_CTX +}; + +struct iwl_rxon_context { + struct ieee80211_vif *vif; + + const u8 *ac_to_fifo; + const u8 *ac_to_queue; + u8 mcast_queue; + + /* + * We could use the vif to indicate active, but we + * also need it to be active during disabling when + * we already removed the vif for type setting. + */ + bool always_active, is_active; + + bool ht_need_multiple_chains; + + enum iwl_rxon_context_id ctxid; + + u32 interface_modes, exclusive_interface_modes; + u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype; + + /* + * We declare this const so it can only be + * changed via explicit cast within the + * routines that actually update the physical + * hardware. + */ + const struct iwl_legacy_rxon_cmd active; + struct iwl_legacy_rxon_cmd staging; + + struct iwl_rxon_time_cmd timing; + + struct iwl_qos_info qos_data; + + u8 bcast_sta_id, ap_sta_id; + + u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd; + u8 qos_cmd; + u8 wep_key_cmd; + + struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; + u8 key_mapping_keys; + + __le32 station_flags; + + struct { + bool non_gf_sta_present; + u8 protection; + bool enabled, is_40mhz; + u8 extension_chan_offset; + } ht; +}; + +struct iwl_priv { + + /* ieee device used by generic ieee processing code */ + struct ieee80211_hw *hw; + struct ieee80211_channel *ieee_channels; + struct ieee80211_rate *ieee_rates; + struct iwl_cfg *cfg; + + /* temporary frame storage list */ + struct list_head free_frames; + int frames_count; + + enum ieee80211_band band; + int alloc_rxb_page; + + void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb); + + struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + + /* spectrum measurement report caching */ + struct iwl_spectrum_notification measure_report; + u8 measurement_status; + + /* ucode beacon time */ + u32 ucode_beacon_time; + int missed_beacon_threshold; + + /* track IBSS manager (last beacon) status */ + u32 ibss_manager; + + /* storing the jiffies when the plcp error rate is received */ + unsigned long plcp_jiffies; + + /* force reset */ + struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; + + /* we allocate array of iwl_channel_info for NIC's valid channels. + * Access via channel # using indirect index array */ + struct iwl_channel_info *channel_info; /* channel info array */ + u8 channel_count; /* # of channels */ + + /* thermal calibration */ + s32 temperature; /* degrees Kelvin */ + s32 last_temperature; + + /* init calibration results */ + struct iwl_calib_result calib_results[IWL_CALIB_MAX]; + + /* Scan related variables */ + unsigned long scan_start; + unsigned long scan_start_tsf; + void *scan_cmd; + enum ieee80211_band scan_band; + struct cfg80211_scan_request *scan_request; + struct ieee80211_vif *scan_vif; + bool is_internal_short_scan; + u8 scan_tx_ant[IEEE80211_NUM_BANDS]; + u8 mgmt_tx_ant; + + /* spinlock */ + spinlock_t lock; /* protect general shared data */ + spinlock_t hcmd_lock; /* protect hcmd */ + spinlock_t reg_lock; /* protect hw register access */ + struct mutex mutex; + struct mutex sync_cmd_mutex; /* enable serialization of sync commands */ + + /* basic pci-network driver stuff */ + struct pci_dev *pci_dev; + + /* pci hardware address support */ + void __iomem *hw_base; + u32 hw_rev; + u32 hw_wa_rev; + u8 rev_id; + + /* microcode/device supports multiple contexts */ + u8 valid_contexts; + + /* command queue number */ + u8 cmd_queue; + + /* max number of station keys */ + u8 sta_key_max_num; + + /* EEPROM MAC addresses */ + struct mac_address addresses[1]; + + /* uCode images, save to reload in case of failure */ + int fw_index; /* firmware we're trying to load */ + u32 ucode_ver; /* version of ucode, copy of + iwl_ucode.ver */ + struct fw_desc ucode_code; /* runtime inst */ + struct fw_desc ucode_data; /* runtime data original */ + struct fw_desc ucode_data_backup; /* runtime data save/restore */ + struct fw_desc ucode_init; /* initialization inst */ + struct fw_desc ucode_init_data; /* initialization data */ + struct fw_desc ucode_boot; /* bootstrap inst */ + enum ucode_type ucode_type; + u8 ucode_write_complete; /* the image write is complete */ + char firmware_name[25]; + + struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; + + struct iwl_switch_rxon switch_rxon; + + /* 1st responses from initialize and runtime uCode images. + * _4965's initialize alive response contains some calibration data. */ + struct iwl_init_alive_resp card_alive_init; + struct iwl_alive_resp card_alive; + + u16 active_rate; + + u8 start_calib; + struct iwl_sensitivity_data sensitivity_data; + struct iwl_chain_noise_data chain_noise_data; + __le16 sensitivity_tbl[HD_TABLE_SIZE]; + + struct iwl_ht_config current_ht_config; + + /* Rate scaling data */ + u8 retry_rate; + + wait_queue_head_t wait_command_queue; + + int activity_timer_active; + + /* Rx and Tx DMA processing queues */ + struct iwl_rx_queue rxq; + struct iwl_tx_queue *txq; + unsigned long txq_ctx_active_msk; + struct iwl_dma_ptr kw; /* keep warm address */ + struct iwl_dma_ptr scd_bc_tbls; + + u32 scd_base_addr; /* scheduler sram base address */ + + unsigned long status; + + /* counts mgmt, ctl, and data packets */ + struct traffic_stats tx_stats; + struct traffic_stats rx_stats; + + /* counts interrupts */ + struct isr_statistics isr_stats; + + struct iwl_power_mgr power_data; + + /* context information */ + u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */ + + /* station table variables */ + + /* Note: if lock and sta_lock are needed, lock must be acquired first */ + spinlock_t sta_lock; + int num_stations; + struct iwl_station_entry stations[IWL_STATION_COUNT]; + unsigned long ucode_key_table; + + /* queue refcounts */ +#define IWL_MAX_HW_QUEUES 32 + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; + /* for each AC */ + atomic_t queue_stop_count[4]; + + /* Indication if ieee80211_ops->open has been called */ + u8 is_open; + + u8 mac80211_registered; + + /* eeprom -- this is in the card's little endian byte order */ + u8 *eeprom; + struct iwl_eeprom_calib_info *calib_info; + + enum nl80211_iftype iw_mode; + + /* Last Rx'd beacon timestamp */ + u64 timestamp; + + union { +#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE) + struct { + void *shared_virt; + dma_addr_t shared_phys; + + struct delayed_work thermal_periodic; + struct delayed_work rfkill_poll; + + struct iwl3945_notif_statistics statistics; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + struct iwl3945_notif_statistics accum_statistics; + struct iwl3945_notif_statistics delta_statistics; + struct iwl3945_notif_statistics max_delta; +#endif + + u32 sta_supp_rates; + int last_rx_rssi; /* From Rx packet statistics */ + + /* Rx'd packet timing information */ + u32 last_beacon_time; + u64 last_tsf; + + /* + * each calibration channel group in the + * EEPROM has a derived clip setting for + * each rate. + */ + const struct iwl3945_clip_group clip_groups[5]; + + } _3945; +#endif +#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE) + struct { + /* + * reporting the number of tids has AGG on. 0 means + * no AGGREGATION + */ + u8 agg_tids_count; + + struct iwl_rx_phy_res last_phy_res; + bool last_phy_res_valid; + + struct completion firmware_loading_complete; + + /* + * chain noise reset and gain commands are the + * two extra calibration commands follows the standard + * phy calibration commands + */ + u8 phy_calib_chain_noise_reset_cmd; + u8 phy_calib_chain_noise_gain_cmd; + + struct iwl_notif_statistics statistics; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + struct iwl_notif_statistics accum_statistics; + struct iwl_notif_statistics delta_statistics; + struct iwl_notif_statistics max_delta; +#endif + + } _4965; +#endif + }; + + struct iwl_hw_params hw_params; + + u32 inta_mask; + + struct workqueue_struct *workqueue; + + struct work_struct restart; + struct work_struct scan_completed; + struct work_struct rx_replenish; + struct work_struct abort_scan; + + struct iwl_rxon_context *beacon_ctx; + struct sk_buff *beacon_skb; + + struct work_struct start_internal_scan; + struct work_struct tx_flush; + + struct tasklet_struct irq_tasklet; + + struct delayed_work init_alive_start; + struct delayed_work alive_start; + struct delayed_work scan_check; + + /* TX Power */ + s8 tx_power_user_lmt; + s8 tx_power_device_lmt; + s8 tx_power_next; + + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + /* debugging info */ + u32 debug_level; /* per device debugging will override global + iwlegacy_debug_level if set */ +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS + /* debugfs */ + u16 tx_traffic_idx; + u16 rx_traffic_idx; + u8 *tx_traffic; + u8 *rx_traffic; + struct dentry *debugfs_dir; + u32 dbgfs_sram_offset, dbgfs_sram_len; + bool disable_ht40; +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */ + + struct work_struct txpower_work; + u32 disable_sens_cal; + u32 disable_chain_noise_cal; + u32 disable_tx_power_cal; + struct work_struct run_time_calib_work; + struct timer_list statistics_periodic; + struct timer_list ucode_trace; + struct timer_list watchdog; + bool hw_ready; + + struct iwl_event_log event_log; + + struct led_classdev led; + unsigned long blink_on, blink_off; + bool led_registered; +}; /*iwl_priv */ + +static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) +{ + set_bit(txq_id, &priv->txq_ctx_active_msk); +} + +static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id) +{ + clear_bit(txq_id, &priv->txq_ctx_active_msk); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +/* + * iwl_legacy_get_debug_level: Return active debug level for device + * + * Using sysfs it is possible to set per device debug level. This debug + * level will be used if set, otherwise the global debug level which can be + * set via module parameter is used. + */ +static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv) +{ + if (priv->debug_level) + return priv->debug_level; + else + return iwlegacy_debug_level; +} +#else +static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv) +{ + return iwlegacy_debug_level; +} +#endif + + +static inline struct ieee80211_hdr * +iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv, + int txq_id, int idx) +{ + if (priv->txq[txq_id].txb[idx].skb) + return (struct ieee80211_hdr *)priv->txq[txq_id]. + txb[idx].skb->data; + return NULL; +} + +static inline struct iwl_rxon_context * +iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif) +{ + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + return vif_priv->ctx; +} + +#define for_each_context(priv, ctx) \ + for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \ + ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \ + if (priv->valid_contexts & BIT(ctx->ctxid)) + +static inline int iwl_legacy_is_associated(struct iwl_priv *priv, + enum iwl_rxon_context_id ctxid) +{ + return (priv->contexts[ctxid].active.filter_flags & + RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv) +{ + return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS); +} + +static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx) +{ + return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; +} + +static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info) +{ + if (ch_info == NULL) + return 0; + return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; +} + +static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info) +{ + return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; +} + +static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info) +{ + return ch_info->band == IEEE80211_BAND_5GHZ; +} + +static inline int +iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch) +{ + return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; +} + +static inline void +__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) +{ + __free_pages(page, priv->hw_params.rx_page_order); + priv->alloc_rxb_page--; +} + +static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page) +{ + free_pages(page, priv->hw_params.rx_page_order); + priv->alloc_rxb_page--; +} +#endif /* __iwl_legacy_dev_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c new file mode 100644 index 000000000000..080b852b33bd --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.c @@ -0,0 +1,45 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/module.h> + +/* sparse doesn't like tracepoint macros */ +#ifndef __CHECKER__ +#include "iwl-dev.h" + +#define CREATE_TRACE_POINTS +#include "iwl-devtrace.h" + +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_event); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_cont_event); +EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_wrap_event); +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h new file mode 100644 index 000000000000..9612aa0f6ec4 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-devtrace.h @@ -0,0 +1,270 @@ +/****************************************************************************** + * + * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) +#define __IWLWIFI_LEGACY_DEVICE_TRACE + +#include <linux/tracepoint.h> + +#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__) +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} +#endif + + +#define PRIV_ENTRY __field(struct iwl_priv *, priv) +#define PRIV_ASSIGN (__entry->priv = priv) + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_legacy_io + +TRACE_EVENT(iwlwifi_legacy_dev_ioread32, + TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] read io[%#x] = %#x", __entry->priv, + __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_legacy_dev_iowrite8, + TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u8, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, + __entry->offs, __entry->val) +); + +TRACE_EVENT(iwlwifi_legacy_dev_iowrite32, + TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val), + TP_ARGS(priv, offs, val), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, offs) + __field(u32, val) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, + __entry->offs, __entry->val) +); + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi_legacy_ucode + +TRACE_EVENT(iwlwifi_legacy_dev_ucode_cont_event, + TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev), + TP_ARGS(priv, time, data, ev), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(u32, time) + __field(u32, data) + __field(u32, ev) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->time = time; + __entry->data = data; + __entry->ev = ev; + ), + TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u", + __entry->priv, __entry->time, __entry->data, __entry->ev) +); + +TRACE_EVENT(iwlwifi_legacy_dev_ucode_wrap_event, + TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry), + TP_ARGS(priv, wraps, n_entry, p_entry), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(u32, wraps) + __field(u32, n_entry) + __field(u32, p_entry) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->wraps = wraps; + __entry->n_entry = n_entry; + __entry->p_entry = p_entry; + ), + TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X", + __entry->priv, __entry->wraps, __entry->n_entry, + __entry->p_entry) +); + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM iwlwifi + +TRACE_EVENT(iwlwifi_legacy_dev_hcmd, + TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags), + TP_ARGS(priv, hcmd, len, flags), + TP_STRUCT__entry( + PRIV_ENTRY + __dynamic_array(u8, hcmd, len) + __field(u32, flags) + ), + TP_fast_assign( + PRIV_ASSIGN; + memcpy(__get_dynamic_array(hcmd), hcmd, len); + __entry->flags = flags; + ), + TP_printk("[%p] hcmd %#.2x (%ssync)", + __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0], + __entry->flags & CMD_ASYNC ? "a" : "") +); + +TRACE_EVENT(iwlwifi_legacy_dev_rx, + TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len), + TP_ARGS(priv, rxbuf, len), + TP_STRUCT__entry( + PRIV_ENTRY + __dynamic_array(u8, rxbuf, len) + ), + TP_fast_assign( + PRIV_ASSIGN; + memcpy(__get_dynamic_array(rxbuf), rxbuf, len); + ), + TP_printk("[%p] RX cmd %#.2x", + __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4]) +); + +TRACE_EVENT(iwlwifi_legacy_dev_tx, + TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen, + void *buf0, size_t buf0_len, + void *buf1, size_t buf1_len), + TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(size_t, framelen) + __dynamic_array(u8, tfd, tfdlen) + + /* + * Do not insert between or below these items, + * we want to keep the frame together (except + * for the possible padding). + */ + __dynamic_array(u8, buf0, buf0_len) + __dynamic_array(u8, buf1, buf1_len) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->framelen = buf0_len + buf1_len; + memcpy(__get_dynamic_array(tfd), tfd, tfdlen); + memcpy(__get_dynamic_array(buf0), buf0, buf0_len); + memcpy(__get_dynamic_array(buf1), buf1, buf1_len); + ), + TP_printk("[%p] TX %.2x (%zu bytes)", + __entry->priv, + ((u8 *)__get_dynamic_array(buf0))[0], + __entry->framelen) +); + +TRACE_EVENT(iwlwifi_legacy_dev_ucode_error, + TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time, + u32 data1, u32 data2, u32 line, u32 blink1, + u32 blink2, u32 ilink1, u32 ilink2), + TP_ARGS(priv, desc, time, data1, data2, line, + blink1, blink2, ilink1, ilink2), + TP_STRUCT__entry( + PRIV_ENTRY + __field(u32, desc) + __field(u32, time) + __field(u32, data1) + __field(u32, data2) + __field(u32, line) + __field(u32, blink1) + __field(u32, blink2) + __field(u32, ilink1) + __field(u32, ilink2) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->desc = desc; + __entry->time = time; + __entry->data1 = data1; + __entry->data2 = data2; + __entry->line = line; + __entry->blink1 = blink1; + __entry->blink2 = blink2; + __entry->ilink1 = ilink1; + __entry->ilink2 = ilink2; + ), + TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, " + "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X", + __entry->priv, __entry->desc, __entry->time, __entry->data1, + __entry->data2, __entry->line, __entry->blink1, + __entry->blink2, __entry->ilink1, __entry->ilink2) +); + +TRACE_EVENT(iwlwifi_legacy_dev_ucode_event, + TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev), + TP_ARGS(priv, time, data, ev), + TP_STRUCT__entry( + PRIV_ENTRY + + __field(u32, time) + __field(u32, data) + __field(u32, ev) + ), + TP_fast_assign( + PRIV_ASSIGN; + __entry->time = time; + __entry->data = data; + __entry->ev = ev; + ), + TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u", + __entry->priv, __entry->time, __entry->data, __entry->ev) +); +#endif /* __IWLWIFI_DEVICE_TRACE */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE iwl-devtrace +#include <trace/define_trace.h> diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c new file mode 100644 index 000000000000..04c5648027df --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.c @@ -0,0 +1,561 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> + +#include <net/mac80211.h> + +#include "iwl-commands.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-debug.h" +#include "iwl-eeprom.h" +#include "iwl-io.h" + +/************************** EEPROM BANDS **************************** + * + * The iwlegacy_eeprom_band definitions below provide the mapping from the + * EEPROM contents to the specific channel number supported for each + * band. + * + * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3 + * definition below maps to physical channel 42 in the 5.2GHz spectrum. + * The specific geography and calibration information for that channel + * is contained in the eeprom map itself. + * + * During init, we copy the eeprom information and channel map + * information into priv->channel_info_24/52 and priv->channel_map_24/52 + * + * channel_map_24/52 provides the index in the channel_info array for a + * given channel. We have to have two separate maps as there is channel + * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and + * band_2 + * + * A value of 0xff stored in the channel_map indicates that the channel + * is not supported by the hardware at all. + * + * A value of 0xfe in the channel_map indicates that the channel is not + * valid for Tx with the current hardware. This means that + * while the system can tune and receive on a given channel, it may not + * be able to associate or transmit any frames on that + * channel. There is no corresponding channel information for that + * entry. + * + *********************************************************************/ + +/* 2.4 GHz */ +const u8 iwlegacy_eeprom_band_1[14] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 +}; + +/* 5.2 GHz bands */ +static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */ + 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 +}; + +static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */ + 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 +}; + +static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */ + 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 +}; + +static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */ + 145, 149, 153, 157, 161, 165 +}; + +static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */ + 1, 2, 3, 4, 5, 6, 7 +}; + +static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */ + 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 +}; + +/****************************************************************************** + * + * EEPROM related functions + * +******************************************************************************/ + +static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv) +{ + u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; + int ret = 0; + + IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp); + switch (gp) { + case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: + case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: + break; + default: + IWL_ERR(priv, "bad EEPROM signature," + "EEPROM_GP=0x%08x\n", gp); + ret = -ENOENT; + break; + } + return ret; +} + +const u8 +*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) +{ + BUG_ON(offset >= priv->cfg->base_params->eeprom_size); + return &priv->eeprom[offset]; +} +EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr); + +u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset) +{ + if (!priv->eeprom) + return 0; + return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); +} +EXPORT_SYMBOL(iwl_legacy_eeprom_query16); + +/** + * iwl_legacy_eeprom_init - read EEPROM contents + * + * Load the EEPROM contents from adapter into priv->eeprom + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int iwl_legacy_eeprom_init(struct iwl_priv *priv) +{ + __le16 *e; + u32 gp = iwl_read32(priv, CSR_EEPROM_GP); + int sz; + int ret; + u16 addr; + + /* allocate eeprom */ + sz = priv->cfg->base_params->eeprom_size; + IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz); + priv->eeprom = kzalloc(sz, GFP_KERNEL); + if (!priv->eeprom) { + ret = -ENOMEM; + goto alloc_err; + } + e = (__le16 *)priv->eeprom; + + priv->cfg->ops->lib->apm_ops.init(priv); + + ret = iwl_legacy_eeprom_verify_signature(priv); + if (ret < 0) { + IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); + ret = -ENOENT; + goto err; + } + + /* Make sure driver (instead of uCode) is allowed to read EEPROM */ + ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv); + if (ret < 0) { + IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n"); + ret = -ENOENT; + goto err; + } + + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + u32 r; + + _iwl_legacy_write32(priv, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + + ret = iwl_poll_bit(priv, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IWL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IWL_ERR(priv, "Time out reading EEPROM[%d]\n", + addr); + goto done; + } + r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG); + e[addr / 2] = cpu_to_le16(r >> 16); + } + + IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n", + "EEPROM", + iwl_legacy_eeprom_query16(priv, EEPROM_VERSION)); + + ret = 0; +done: + priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv); + +err: + if (ret) + iwl_legacy_eeprom_free(priv); + /* Reset chip to save power until we load uCode during "up". */ + iwl_legacy_apm_stop(priv); +alloc_err: + return ret; +} +EXPORT_SYMBOL(iwl_legacy_eeprom_init); + +void iwl_legacy_eeprom_free(struct iwl_priv *priv) +{ + kfree(priv->eeprom); + priv->eeprom = NULL; +} +EXPORT_SYMBOL(iwl_legacy_eeprom_free); + +static void iwl_legacy_init_band_reference(const struct iwl_priv *priv, + int eep_band, int *eeprom_ch_count, + const struct iwl_eeprom_channel **eeprom_ch_info, + const u8 **eeprom_ch_index) +{ + u32 offset = priv->cfg->ops->lib-> + eeprom_ops.regulatory_bands[eep_band - 1]; + switch (eep_band) { + case 1: /* 2.4GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_1; + break; + case 2: /* 4.9GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_2; + break; + case 3: /* 5.2GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_3; + break; + case 4: /* 5.5GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_4; + break; + case 5: /* 5.7GHz band */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_5; + break; + case 6: /* 2.4GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_6; + break; + case 7: /* 5 GHz ht40 channels */ + *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7); + *eeprom_ch_info = (struct iwl_eeprom_channel *) + iwl_legacy_eeprom_query_addr(priv, offset); + *eeprom_ch_index = iwlegacy_eeprom_band_7; + break; + default: + BUG(); + return; + } +} + +#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") +/** + * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv. + * + * Does not set up a command, or touch hardware. + */ +static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv, + enum ieee80211_band band, u16 channel, + const struct iwl_eeprom_channel *eeprom_ch, + u8 clear_ht40_extension_channel) +{ + struct iwl_channel_info *ch_info; + + ch_info = (struct iwl_channel_info *) + iwl_legacy_get_channel_info(priv, band, channel); + + if (!iwl_legacy_is_channel_valid(ch_info)) + return -1; + + IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" + " Ad-Hoc %ssupported\n", + ch_info->channel, + iwl_legacy_is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT(IBSS), + CHECK_AND_PRINT(ACTIVE), + CHECK_AND_PRINT(RADAR), + CHECK_AND_PRINT(WIDE), + CHECK_AND_PRINT(DFS), + eeprom_ch->flags, + eeprom_ch->max_power_avg, + ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? + "" : "not "); + + ch_info->ht40_eeprom = *eeprom_ch; + ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; + ch_info->ht40_flags = eeprom_ch->flags; + if (eeprom_ch->flags & EEPROM_CHANNEL_VALID) + ch_info->ht40_extension_channel &= + ~clear_ht40_extension_channel; + + return 0; +} + +#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ + ? # x " " : "") + +/** + * iwl_legacy_init_channel_map - Set up driver's info for all possible channels + */ +int iwl_legacy_init_channel_map(struct iwl_priv *priv) +{ + int eeprom_ch_count = 0; + const u8 *eeprom_ch_index = NULL; + const struct iwl_eeprom_channel *eeprom_ch_info = NULL; + int band, ch; + struct iwl_channel_info *ch_info; + + if (priv->channel_count) { + IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n"); + return 0; + } + + IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n"); + + priv->channel_count = + ARRAY_SIZE(iwlegacy_eeprom_band_1) + + ARRAY_SIZE(iwlegacy_eeprom_band_2) + + ARRAY_SIZE(iwlegacy_eeprom_band_3) + + ARRAY_SIZE(iwlegacy_eeprom_band_4) + + ARRAY_SIZE(iwlegacy_eeprom_band_5); + + IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n", + priv->channel_count); + + priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * + priv->channel_count, GFP_KERNEL); + if (!priv->channel_info) { + IWL_ERR(priv, "Could not allocate channel_info\n"); + priv->channel_count = 0; + return -ENOMEM; + } + + ch_info = priv->channel_info; + + /* Loop through the 5 EEPROM bands adding them in order to the + * channel map we maintain (that contains additional information than + * what just in the EEPROM) */ + for (band = 1; band <= 5; band++) { + + iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + ch_info->channel = eeprom_ch_index[ch]; + ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ : + IEEE80211_BAND_5GHZ; + + /* permanently store EEPROM's channel regulatory flags + * and max power in channel info database. */ + ch_info->eeprom = eeprom_ch_info[ch]; + + /* Copy the run-time flags so they are there even on + * invalid channels */ + ch_info->flags = eeprom_ch_info[ch].flags; + /* First write that ht40 is not enabled, and then enable + * one by one */ + ch_info->ht40_extension_channel = + IEEE80211_CHAN_NO_HT40; + + if (!(iwl_legacy_is_channel_valid(ch_info))) { + IWL_DEBUG_EEPROM(priv, + "Ch. %d Flags %x [%sGHz] - " + "No traffic\n", + ch_info->channel, + ch_info->flags, + iwl_legacy_is_channel_a_band(ch_info) ? + "5.2" : "2.4"); + ch_info++; + continue; + } + + /* Initialize regulatory-based run-time data */ + ch_info->max_power_avg = ch_info->curr_txpow = + eeprom_ch_info[ch].max_power_avg; + ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; + ch_info->min_power = 0; + + IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] " + "%s%s%s%s%s%s(0x%02x %ddBm):" + " Ad-Hoc %ssupported\n", + ch_info->channel, + iwl_legacy_is_channel_a_band(ch_info) ? + "5.2" : "2.4", + CHECK_AND_PRINT_I(VALID), + CHECK_AND_PRINT_I(IBSS), + CHECK_AND_PRINT_I(ACTIVE), + CHECK_AND_PRINT_I(RADAR), + CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(DFS), + eeprom_ch_info[ch].flags, + eeprom_ch_info[ch].max_power_avg, + ((eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_IBSS) + && !(eeprom_ch_info[ch]. + flags & EEPROM_CHANNEL_RADAR)) + ? "" : "not "); + + /* Set the tx_power_user_lmt to the highest power + * supported by any channel */ + if (eeprom_ch_info[ch].max_power_avg > + priv->tx_power_user_lmt) + priv->tx_power_user_lmt = + eeprom_ch_info[ch].max_power_avg; + + ch_info++; + } + } + + /* Check if we do have HT40 channels */ + if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] == + EEPROM_REGULATORY_BAND_NO_HT40 && + priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] == + EEPROM_REGULATORY_BAND_NO_HT40) + return 0; + + /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */ + for (band = 6; band <= 7; band++) { + enum ieee80211_band ieeeband; + + iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count, + &eeprom_ch_info, &eeprom_ch_index); + + /* EEPROM band 6 is 2.4, band 7 is 5 GHz */ + ieeeband = + (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + + /* Loop through each band adding each of the channels */ + for (ch = 0; ch < eeprom_ch_count; ch++) { + /* Set up driver's info for lower half */ + iwl_legacy_mod_ht40_chan_info(priv, ieeeband, + eeprom_ch_index[ch], + &eeprom_ch_info[ch], + IEEE80211_CHAN_NO_HT40PLUS); + + /* Set up driver's info for upper half */ + iwl_legacy_mod_ht40_chan_info(priv, ieeeband, + eeprom_ch_index[ch] + 4, + &eeprom_ch_info[ch], + IEEE80211_CHAN_NO_HT40MINUS); + } + } + + return 0; +} +EXPORT_SYMBOL(iwl_legacy_init_channel_map); + +/* + * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map + */ +void iwl_legacy_free_channel_map(struct iwl_priv *priv) +{ + kfree(priv->channel_info); + priv->channel_count = 0; +} +EXPORT_SYMBOL(iwl_legacy_free_channel_map); + +/** + * iwl_legacy_get_channel_info - Find driver's private channel info + * + * Based on band and channel number. + */ +const struct +iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv, + enum ieee80211_band band, u16 channel) +{ + int i; + + switch (band) { + case IEEE80211_BAND_5GHZ: + for (i = 14; i < priv->channel_count; i++) { + if (priv->channel_info[i].channel == channel) + return &priv->channel_info[i]; + } + break; + case IEEE80211_BAND_2GHZ: + if (channel >= 1 && channel <= 14) + return &priv->channel_info[channel - 1]; + break; + default: + BUG(); + } + + return NULL; +} +EXPORT_SYMBOL(iwl_legacy_get_channel_info); diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h new file mode 100644 index 000000000000..c59c81002022 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-eeprom.h @@ -0,0 +1,344 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_legacy_eeprom_h__ +#define __iwl_legacy_eeprom_h__ + +#include <net/mac80211.h> + +struct iwl_priv; + +/* + * EEPROM access time values: + * + * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. + * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). + * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. + * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. + */ +#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ + +#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ +#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + + +/* + * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags. + * + * IBSS and/or AP operation is allowed *only* on those channels with + * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because + * RADAR detection is not supported by the 4965 driver, but is a + * requirement for establishing a new network for legal operation on channels + * requiring RADAR detection or restricting ACTIVE scanning. + * + * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels. + * It only indicates that 20 MHz channel use is supported; HT40 channel + * usage is indicated by a separate set of regulatory flags for each + * HT40 channel pair. + * + * NOTE: Using a channel inappropriately will result in a uCode error! + */ +#define IWL_NUM_TX_CALIB_GROUPS 5 +enum { + EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */ + EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */ + /* Bit 2 Reserved */ + EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */ + EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */ + EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */ + /* Bit 6 Reserved (was Narrow Channel) */ + EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */ +}; + +/* SKU Capabilities */ +/* 3945 only */ +#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0) +#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1) + +/* *regulatory* channel data format in eeprom, one for each channel. + * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */ +struct iwl_eeprom_channel { + u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */ + s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ +} __packed; + +/* 3945 Specific */ +#define EEPROM_3945_EEPROM_VERSION (0x2f) + +/* 4965 has two radio transmitters (and 3 radio receivers) */ +#define EEPROM_TX_POWER_TX_CHAINS (2) + +/* 4965 has room for up to 8 sets of txpower calibration data */ +#define EEPROM_TX_POWER_BANDS (8) + +/* 4965 factory calibration measures txpower gain settings for + * each of 3 target output levels */ +#define EEPROM_TX_POWER_MEASUREMENTS (3) + +/* 4965 Specific */ +/* 4965 driver does not work with txpower calibration version < 5 */ +#define EEPROM_4965_TX_POWER_VERSION (5) +#define EEPROM_4965_EEPROM_VERSION (0x2f) +#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */ +#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */ +#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */ +#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */ + +/* 2.4 GHz */ +extern const u8 iwlegacy_eeprom_band_1[14]; + +/* + * factory calibration data for one txpower level, on one channel, + * measured on one of the 2 tx chains (radio transmitter and associated + * antenna). EEPROM contains: + * + * 1) Temperature (degrees Celsius) of device when measurement was made. + * + * 2) Gain table index used to achieve the target measurement power. + * This refers to the "well-known" gain tables (see iwl-4965-hw.h). + * + * 3) Actual measured output power, in half-dBm ("34" = 17 dBm). + * + * 4) RF power amplifier detector level measurement (not used). + */ +struct iwl_eeprom_calib_measure { + u8 temperature; /* Device temperature (Celsius) */ + u8 gain_idx; /* Index into gain table */ + u8 actual_pow; /* Measured RF output power, half-dBm */ + s8 pa_det; /* Power amp detector level (not used) */ +} __packed; + + +/* + * measurement set for one channel. EEPROM contains: + * + * 1) Channel number measured + * + * 2) Measurements for each of 3 power levels for each of 2 radio transmitters + * (a.k.a. "tx chains") (6 measurements altogether) + */ +struct iwl_eeprom_calib_ch_info { + u8 ch_num; + struct iwl_eeprom_calib_measure + measurements[EEPROM_TX_POWER_TX_CHAINS] + [EEPROM_TX_POWER_MEASUREMENTS]; +} __packed; + +/* + * txpower subband info. + * + * For each frequency subband, EEPROM contains the following: + * + * 1) First and last channels within range of the subband. "0" values + * indicate that this sample set is not being used. + * + * 2) Sample measurement sets for 2 channels close to the range endpoints. + */ +struct iwl_eeprom_calib_subband_info { + u8 ch_from; /* channel number of lowest channel in subband */ + u8 ch_to; /* channel number of highest channel in subband */ + struct iwl_eeprom_calib_ch_info ch1; + struct iwl_eeprom_calib_ch_info ch2; +} __packed; + + +/* + * txpower calibration info. EEPROM contains: + * + * 1) Factory-measured saturation power levels (maximum levels at which + * tx power amplifier can output a signal without too much distortion). + * There is one level for 2.4 GHz band and one for 5 GHz band. These + * values apply to all channels within each of the bands. + * + * 2) Factory-measured power supply voltage level. This is assumed to be + * constant (i.e. same value applies to all channels/bands) while the + * factory measurements are being made. + * + * 3) Up to 8 sets of factory-measured txpower calibration values. + * These are for different frequency ranges, since txpower gain + * characteristics of the analog radio circuitry vary with frequency. + * + * Not all sets need to be filled with data; + * struct iwl_eeprom_calib_subband_info contains range of channels + * (0 if unused) for each set of data. + */ +struct iwl_eeprom_calib_info { + u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */ + u8 saturation_power52; /* half-dBm */ + __le16 voltage; /* signed */ + struct iwl_eeprom_calib_subband_info + band_info[EEPROM_TX_POWER_BANDS]; +} __packed; + + +/* General */ +#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */ +#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */ +#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ +#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ +#define EEPROM_VERSION (2*0x44) /* 2 bytes */ +#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */ +#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ +#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ +#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ +#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ + +/* The following masks are to be applied on EEPROM_RADIO_CONFIG */ +#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ +#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */ +#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */ +#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */ +#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ +#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ + +#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0 +#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1 + +/* + * Per-channel regulatory data. + * + * Each channel that *might* be supported by iwl has a fixed location + * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory + * txpower (MSB). + * + * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz) + * channels (only for 4965, not supported by 3945) appear later in the EEPROM. + * + * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 + */ +#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */ +#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */ + +/* + * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196, + * 5.0 GHz channels 7, 8, 11, 12, 16 + * (4915-5080MHz) (none of these is ever supported) + */ +#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */ + +/* + * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 + * (5170-5320MHz) + */ +#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */ + +/* + * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 + * (5500-5700MHz) + */ +#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */ + +/* + * 5.7 GHz channels 145, 149, 153, 157, 161, 165 + * (5725-5825MHz) + */ +#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */ +#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */ + +/* + * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11) + * + * The channel listed is the center of the lower 20 MHz half of the channel. + * The overall center frequency is actually 2 channels (10 MHz) above that, + * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away + * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5, + * and the overall HT40 channel width centers on channel 3. + * + * NOTE: The RXON command uses 20 MHz channel numbers to specify the + * control channel to which to tune. RXON also specifies whether the + * control channel is the upper or lower half of a HT40 channel. + * + * NOTE: 4965 does not support HT40 channels on 2.4 GHz. + */ +#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */ + +/* + * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64), + * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161) + */ +#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */ + +#define EEPROM_REGULATORY_BAND_NO_HT40 (0) + +struct iwl_eeprom_ops { + const u32 regulatory_bands[7]; + int (*acquire_semaphore) (struct iwl_priv *priv); + void (*release_semaphore) (struct iwl_priv *priv); +}; + + +int iwl_legacy_eeprom_init(struct iwl_priv *priv); +void iwl_legacy_eeprom_free(struct iwl_priv *priv); +const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, + size_t offset); +u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset); +int iwl_legacy_init_channel_map(struct iwl_priv *priv); +void iwl_legacy_free_channel_map(struct iwl_priv *priv); +const struct iwl_channel_info *iwl_legacy_get_channel_info( + const struct iwl_priv *priv, + enum ieee80211_band band, u16 channel); + +#endif /* __iwl_legacy_eeprom_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h new file mode 100644 index 000000000000..4e20c7e5c883 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-fh.h @@ -0,0 +1,513 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_legacy_fh_h__ +#define __iwl_legacy_fh_h__ + +/****************************/ +/* Flow Handler Definitions */ +/****************************/ + +/** + * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) + * Addresses are offsets from device's PCI hardware base address. + */ +#define FH_MEM_LOWER_BOUND (0x1000) +#define FH_MEM_UPPER_BOUND (0x2000) + +/** + * Keep-Warm (KW) buffer base address. + * + * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the + * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency + * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host + * from going into a power-savings mode that would cause higher DRAM latency, + * and possible data over/under-runs, before all Tx/Rx is complete. + * + * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4) + * of the buffer, which must be 4K aligned. Once this is set up, the 4965 + * automatically invokes keep-warm accesses when normal accesses might not + * be sufficient to maintain fast DRAM response. + * + * Bit fields: + * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned + */ +#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) + + +/** + * TFD Circular Buffers Base (CBBC) addresses + * + * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident + * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs) + * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 + * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte + * aligned (address bits 0-7 must be 0). + * + * Bit fields in each pointer register: + * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned + */ +#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) + +/* Find TFD CB base pointer for given queue (range 0-15). */ +#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4) + + +/** + * Rx SRAM Control and Status Registers (RSCSR) + * + * These registers provide handshake between driver and 4965 for the Rx queue + * (this queue handles *all* command responses, notifications, Rx data, etc. + * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx + * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can + * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer + * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1 + * mapping between RBDs and RBs. + * + * Driver must allocate host DRAM memory for the following, and set the + * physical address of each into 4965 registers: + * + * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256 + * entries (although any power of 2, up to 4096, is selectable by driver). + * Each entry (1 dword) points to a receive buffer (RB) of consistent size + * (typically 4K, although 8K or 16K are also selectable by driver). + * Driver sets up RB size and number of RBDs in the CB via Rx config + * register FH_MEM_RCSR_CHNL0_CONFIG_REG. + * + * Bit fields within one RBD: + * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned + * + * Driver sets physical address [35:8] of base of RBD circular buffer + * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0]. + * + * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers + * (RBs) have been filled, via a "write pointer", actually the index of + * the RB's corresponding RBD within the circular buffer. Driver sets + * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0]. + * + * Bit fields in lower dword of Rx status buffer (upper dword not used + * by driver; see struct iwl4965_shared, val0): + * 31-12: Not used by driver + * 11- 0: Index of last filled Rx buffer descriptor + * (4965 writes, driver reads this value) + * + * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must + * enter pointers to these RBs into contiguous RBD circular buffer entries, + * and update the 4965's "write" index register, + * FH_RSCSR_CHNL0_RBDCB_WPTR_REG. + * + * This "write" index corresponds to the *next* RBD that the driver will make + * available, i.e. one RBD past the tail of the ready-to-fill RBDs within + * the circular buffer. This value should initially be 0 (before preparing any + * RBs), should be 8 after preparing the first 8 RBs (for example), and must + * wrap back to 0 at the end of the circular buffer (but don't wrap before + * "read" index has advanced past 1! See below). + * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8. + * + * As the 4965 fills RBs (referenced from contiguous RBDs within the circular + * buffer), it updates the Rx status buffer in host DRAM, 2) described above, + * to tell the driver the index of the latest filled RBD. The driver must + * read this "read" index from DRAM after receiving an Rx interrupt from 4965. + * + * The driver must also internally keep track of a third index, which is the + * next RBD to process. When receiving an Rx interrupt, driver should process + * all filled but unprocessed RBs up to, but not including, the RB + * corresponding to the "read" index. For example, if "read" index becomes "1", + * driver may process the RB pointed to by RBD 0. Depending on volume of + * traffic, there may be many RBs to process. + * + * If read index == write index, 4965 thinks there is no room to put new data. + * Due to this, the maximum number of filled RBs is 255, instead of 256. To + * be safe, make sure that there is a gap of at least 2 RBDs between "write" + * and "read" indexes; that is, make sure that there are no more than 254 + * buffers waiting to be filled. + */ +#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0) +#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) + +/** + * Physical base address of 8-byte Rx Status buffer. + * Bit fields: + * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. + */ +#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) + +/** + * Physical base address of Rx Buffer Descriptor Circular Buffer. + * Bit fields: + * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. + */ +#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) + +/** + * Rx write pointer (index, really!). + * Bit fields: + * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. + * NOTE: For 256-entry circular buffer, use only bits [7:0]. + */ +#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008) +#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG) + + +/** + * Rx Config/Status Registers (RCSR) + * Rx Config Reg for channel 0 (only channel used) + * + * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for + * normal operation (see bit fields). + * + * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA. + * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for + * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing. + * + * Bit fields: + * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29-24: reserved + * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal), + * min "5" for 32 RBDs, max "12" for 4096 RBDs. + * 19-18: reserved + * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K, + * '10' 12K, '11' 16K. + * 15-14: reserved + * 13-12: IRQ destination; '00' none, '01' host driver (normal operation) + * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec) + * typical value 0x10 (about 1/2 msec) + * 3- 0: reserved + */ +#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0) +#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND) + +#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0) + +#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */ +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */ +#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */ +#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */ +#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/ + +#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20) +#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4) +#define RX_RB_TIMEOUT (0x10) + +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000) +#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000) + +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000) +#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000) + +#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) +#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) + +#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */ + +/** + * Rx Shared Status Registers (RSSR) + * + * After stopping Rx DMA channel (writing 0 to + * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll + * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle. + * + * Bit fields: + * 24: 1 = Channel 0 is idle + * + * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV + * contain default values that should not be altered by the driver. + */ +#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40) +#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) + +#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND) +#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004) +#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\ + (FH_MEM_RSSR_LOWER_BOUND + 0x008) + +#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000) + +#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 + +/* TFDB Area - TFDs buffer table */ +#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) +#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) +#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958) +#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) +#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) + +/** + * Transmit DMA Channel Control/Status Registers (TCSR) + * + * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels + * supported in hardware (don't confuse these with the 16 Tx queues in DRAM, + * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes. + * + * To use a Tx DMA channel, driver must initialize its + * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with: + * + * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL + * + * All other bits should be 0. + * + * Bit fields: + * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame, + * '10' operate normally + * 29- 4: Reserved, set to "0" + * 3: Enable internal DMA requests (1, normal operation), disable (0) + * 2- 0: Reserved, set to "0" + */ +#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00) +#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60) + +/* Find Control/Status reg for given Tx DMA/FIFO channel */ +#define FH49_TCSR_CHNL_NUM (7) +#define FH50_TCSR_CHNL_NUM (8) + +/* TCSR: tx_config register values */ +#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl)) +#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4) +#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \ + (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8) + +#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001) + +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008) + +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000) + +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000) +#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000) + +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000) +#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000) + +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003) + +#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) +#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) + +/** + * Tx Shared Status Registers (TSSR) + * + * After stopping Tx DMA channel (writing 0 to + * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll + * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle + * (channel's buffers empty | no pending requests). + * + * Bit fields: + * 31-24: 1 = Channel buffers empty (channel 7:0) + * 23-16: 1 = No pending requests (channel 7:0) + */ +#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0) +#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0) + +#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) + +/** + * Bit fields for TSSR(Tx Shared Status & Control) error status register: + * 31: Indicates an address error when accessed to internal memory + * uCode/driver must write "1" in order to clear this flag + * 30: Indicates that Host did not send the expected number of dwords to FH + * uCode/driver must write "1" in order to clear this flag + * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA + * command was received from the scheduler while the TRB was already full + * with previous command + * uCode/driver must write "1" in order to clear this flag + * 7-0: Each status bit indicates a channel's TxCredit error. When an error + * bit is set, it indicates that the FH has received a full indication + * from the RTC TxFIFO and the current value of the TxCredit counter was + * not equal to zero. This mean that the credit mechanism was not + * synchronized to the TxFIFO status + * uCode/driver must write "1" in order to clear this flag + */ +#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018) + +#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16) + +/* Tx service channels */ +#define FH_SRVC_CHNL (9) +#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8) +#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \ + (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) + +#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) +/* Instruct FH to increment the retry count of a packet when + * it is brought from the memory to TX-FIFO + */ +#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) + +#define RX_QUEUE_SIZE 256 +#define RX_QUEUE_MASK 255 +#define RX_QUEUE_SIZE_LOG 8 + +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + +/* Size of one Rx buffer in host DRAM */ +#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */ +#define IWL_RX_BUF_SIZE_4K (4 * 1024) +#define IWL_RX_BUF_SIZE_8K (8 * 1024) + +/** + * struct iwl_rb_status - reseve buffer status + * host memory mapped FH registers + * @closed_rb_num [0:11] - Indicates the index of the RB which was closed + * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed + * @finished_rb_num [0:11] - Indicates the index of the current RB + * in which the last frame was written to + * @finished_fr_num [0:11] - Indicates the index of the RX Frame + * which was transfered + */ +struct iwl_rb_status { + __le16 closed_rb_num; + __le16 closed_fr_num; + __le16 finished_rb_num; + __le16 finished_fr_nam; + __le32 __unused; /* 3945 only */ +} __packed; + + +#define TFD_QUEUE_SIZE_MAX (256) +#define TFD_QUEUE_SIZE_BC_DUP (64) +#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) +#define IWL_TX_DMA_MASK DMA_BIT_MASK(36) +#define IWL_NUM_OF_TBS 20 + +static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr) +{ + return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF; +} +/** + * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor + * + * This structure contains dma address and length of transmission address + * + * @lo: low [31:0] portion of the dma address of TX buffer + * every even is unaligned on 16 bit boundary + * @hi_n_len 0-3 [35:32] portion of dma + * 4-15 length of the tx buffer + */ +struct iwl_tfd_tb { + __le32 lo; + __le16 hi_n_len; +} __packed; + +/** + * struct iwl_tfd + * + * Transmit Frame Descriptor (TFD) + * + * @ __reserved1[3] reserved + * @ num_tbs 0-4 number of active tbs + * 5 reserved + * 6-7 padding (not used) + * @ tbs[20] transmit frame buffer descriptors + * @ __pad padding + * + * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM. + * Both driver and device share these circular buffers, each of which must be + * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes + * + * Driver must indicate the physical address of the base of each + * circular buffer via the FH_MEM_CBBC_QUEUE registers. + * + * Each TFD contains pointer/size information for up to 20 data buffers + * in host DRAM. These buffers collectively contain the (one) frame described + * by the TFD. Each buffer must be a single contiguous block of memory within + * itself, but buffers may be scattered in host DRAM. Each buffer has max size + * of (4K - 4). The concatenates all of a TFD's buffers into a single + * Tx frame, up to 8 KBytes in size. + * + * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx. + */ +struct iwl_tfd { + u8 __reserved1[3]; + u8 num_tbs; + struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS]; + __le32 __pad; +} __packed; + +/* Keep Warm Size */ +#define IWL_KW_SIZE 0x1000 /* 4k */ + +#endif /* !__iwl_legacy_fh_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c new file mode 100644 index 000000000000..9d721cbda5bb --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c @@ -0,0 +1,271 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <net/mac80211.h> + +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-eeprom.h" +#include "iwl-core.h" + + +const char *iwl_legacy_get_cmd_string(u8 cmd) +{ + switch (cmd) { + IWL_CMD(REPLY_ALIVE); + IWL_CMD(REPLY_ERROR); + IWL_CMD(REPLY_RXON); + IWL_CMD(REPLY_RXON_ASSOC); + IWL_CMD(REPLY_QOS_PARAM); + IWL_CMD(REPLY_RXON_TIMING); + IWL_CMD(REPLY_ADD_STA); + IWL_CMD(REPLY_REMOVE_STA); + IWL_CMD(REPLY_WEPKEY); + IWL_CMD(REPLY_3945_RX); + IWL_CMD(REPLY_TX); + IWL_CMD(REPLY_RATE_SCALE); + IWL_CMD(REPLY_LEDS_CMD); + IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); + IWL_CMD(REPLY_CHANNEL_SWITCH); + IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); + IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); + IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); + IWL_CMD(POWER_TABLE_CMD); + IWL_CMD(PM_SLEEP_NOTIFICATION); + IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); + IWL_CMD(REPLY_SCAN_CMD); + IWL_CMD(REPLY_SCAN_ABORT_CMD); + IWL_CMD(SCAN_START_NOTIFICATION); + IWL_CMD(SCAN_RESULTS_NOTIFICATION); + IWL_CMD(SCAN_COMPLETE_NOTIFICATION); + IWL_CMD(BEACON_NOTIFICATION); + IWL_CMD(REPLY_TX_BEACON); + IWL_CMD(REPLY_TX_PWR_TABLE_CMD); + IWL_CMD(REPLY_BT_CONFIG); + IWL_CMD(REPLY_STATISTICS_CMD); + IWL_CMD(STATISTICS_NOTIFICATION); + IWL_CMD(CARD_STATE_NOTIFICATION); + IWL_CMD(MISSED_BEACONS_NOTIFICATION); + IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); + IWL_CMD(SENSITIVITY_CMD); + IWL_CMD(REPLY_PHY_CALIBRATION_CMD); + IWL_CMD(REPLY_RX_PHY_CMD); + IWL_CMD(REPLY_RX_MPDU_CMD); + IWL_CMD(REPLY_RX); + IWL_CMD(REPLY_COMPRESSED_BA); + default: + return "UNKNOWN"; + + } +} +EXPORT_SYMBOL(iwl_legacy_get_cmd_string); + +#define HOST_COMPLETE_TIMEOUT (HZ / 2) + +static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt) +{ + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from %s (0x%08X)\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + return; + } + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + switch (cmd->hdr.cmd) { + case REPLY_TX_LINK_QUALITY_CMD: + case SENSITIVITY_CMD: + IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + break; + default: + IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); + } +#endif +} + +static int +iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int ret; + + BUG_ON(!(cmd->flags & CMD_ASYNC)); + + /* An asynchronous command can not expect an SKB to be set. */ + BUG_ON(cmd->flags & CMD_WANT_SKB); + + /* Assign a generic callback if one is not provided */ + if (!cmd->callback) + cmd->callback = iwl_legacy_generic_cmd_callback; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EBUSY; + + ret = iwl_legacy_enqueue_hcmd(priv, cmd); + if (ret < 0) { + IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + iwl_legacy_get_cmd_string(cmd->id), ret); + return ret; + } + return 0; +} + +int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + int cmd_idx; + int ret; + + BUG_ON(cmd->flags & CMD_ASYNC); + + /* A synchronous command can not have a callback set. */ + BUG_ON(cmd->callback); + + IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", + iwl_legacy_get_cmd_string(cmd->id)); + mutex_lock(&priv->sync_cmd_mutex); + + set_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", + iwl_legacy_get_cmd_string(cmd->id)); + + cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd); + if (cmd_idx < 0) { + ret = cmd_idx; + IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + iwl_legacy_get_cmd_string(cmd->id), ret); + goto out; + } + + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + !test_bit(STATUS_HCMD_ACTIVE, &priv->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { + IWL_ERR(priv, + "Error sending %s: time out after %dms.\n", + iwl_legacy_get_cmd_string(cmd->id), + jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, + "Clearing HCMD_ACTIVE for command %s\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -ETIMEDOUT; + goto cancel; + } + } + + if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { + IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -ECANCELED; + goto fail; + } + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_ERR(priv, "Command %s failed: FW Error\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -EIO; + goto fail; + } + if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { + IWL_ERR(priv, "Error: Response NULL in '%s'\n", + iwl_legacy_get_cmd_string(cmd->id)); + ret = -EIO; + goto cancel; + } + + ret = 0; + goto out; + +cancel: + if (cmd->flags & CMD_WANT_SKB) { + /* + * Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). + */ + priv->txq[priv->cmd_queue].meta[cmd_idx].flags &= + ~CMD_WANT_SKB; + } +fail: + if (cmd->reply_page) { + iwl_legacy_free_pages(priv, cmd->reply_page); + cmd->reply_page = 0; + } +out: + mutex_unlock(&priv->sync_cmd_mutex); + return ret; +} +EXPORT_SYMBOL(iwl_legacy_send_cmd_sync); + +int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + if (cmd->flags & CMD_ASYNC) + return iwl_legacy_send_cmd_async(priv, cmd); + + return iwl_legacy_send_cmd_sync(priv, cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_cmd); + +int +iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + return iwl_legacy_send_cmd_sync(priv, &cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu); + +int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, + u8 id, u16 len, const void *data, + void (*callback)(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt)) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = len, + .data = data, + }; + + cmd.flags |= CMD_ASYNC; + cmd.callback = callback; + + return iwl_legacy_send_cmd_async(priv, &cmd); +} +EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async); diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h new file mode 100644 index 000000000000..02132e755831 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-helpers.h @@ -0,0 +1,181 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_helpers_h__ +#define __iwl_legacy_helpers_h__ + +#include <linux/ctype.h> +#include <net/mac80211.h> + +#include "iwl-io.h" + +#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) + + +static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf( + struct ieee80211_hw *hw) +{ + return &hw->conf; +} + +/** + * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning + * @index -- current index + * @n_bd -- total number of entries in queue (must be power of 2) + */ +static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd) +{ + return ++index & (n_bd - 1); +} + +/** + * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end + * @index -- current index + * @n_bd -- total number of entries in queue (must be power of 2) + */ +static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd) +{ + return --index & (n_bd - 1); +} + +/* TODO: Move fw_desc functions to iwl-pci.ko */ +static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev, + struct fw_desc *desc) +{ + if (desc->v_addr) + dma_free_coherent(&pci_dev->dev, desc->len, + desc->v_addr, desc->p_addr); + desc->v_addr = NULL; + desc->len = 0; +} + +static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev, + struct fw_desc *desc) +{ + if (!desc->len) { + desc->v_addr = NULL; + return -EINVAL; + } + + desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len, + &desc->p_addr, GFP_KERNEL); + return (desc->v_addr != NULL) ? 0 : -ENOMEM; +} + +/* + * we have 8 bits used like this: + * + * 7 6 5 4 3 2 1 0 + * | | | | | | | | + * | | | | | | +-+-------- AC queue (0-3) + * | | | | | | + * | +-+-+-+-+------------ HW queue ID + * | + * +---------------------- unused + */ +static inline void +iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq) +{ + BUG_ON(ac > 3); /* only have 2 bits */ + BUG_ON(hwq > 31); /* only use 5 bits */ + + txq->swq_id = (hwq << 2) | ac; +} + +static inline void iwl_legacy_wake_queue(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + u8 queue = txq->swq_id; + u8 ac = queue & 3; + u8 hwq = (queue >> 2) & 0x1f; + + if (test_and_clear_bit(hwq, priv->queue_stopped)) + if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0) + ieee80211_wake_queue(priv->hw, ac); +} + +static inline void iwl_legacy_stop_queue(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + u8 queue = txq->swq_id; + u8 ac = queue & 3; + u8 hwq = (queue >> 2) & 0x1f; + + if (!test_and_set_bit(hwq, priv->queue_stopped)) + if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0) + ieee80211_stop_queue(priv->hw, ac); +} + +#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue +#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue + +static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv) +{ + clear_bit(STATUS_INT_ENABLED, &priv->status); + + /* disable interrupts from uCode/NIC to host */ + iwl_write32(priv, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(priv, CSR_INT, 0xffffffff); + iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); + IWL_DEBUG_ISR(priv, "Disabled interrupts\n"); +} + +static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv) +{ + IWL_DEBUG_ISR(priv, "Enabling interrupts\n"); + set_bit(STATUS_INT_ENABLED, &priv->status); + iwl_write32(priv, CSR_INT_MASK, priv->inta_mask); +} + +/** + * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv, + u16 tsf_bits) +{ + return (1 << tsf_bits) - 1; +} + +/** + * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time + * @priv -- pointer to iwl_priv data structure + * @tsf_bits -- number of bits need to shift for masking) + */ +static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv, + u16 tsf_bits) +{ + return ((1 << (32 - tsf_bits)) - 1) << tsf_bits; +} + +#endif /* __iwl_legacy_helpers_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h new file mode 100644 index 000000000000..5cc5d342914f --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-io.h @@ -0,0 +1,545 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_io_h__ +#define __iwl_legacy_io_h__ + +#include <linux/io.h> + +#include "iwl-dev.h" +#include "iwl-debug.h" +#include "iwl-devtrace.h" + +/* + * IO, register, and NIC memory access functions + * + * NOTE on naming convention and macro usage for these + * + * A single _ prefix before a an access function means that no state + * check or debug information is printed when that function is called. + * + * A double __ prefix before an access function means that state is checked + * and the current line number and caller function name are printed in addition + * to any other debug output. + * + * The non-prefixed name is the #define that maps the caller into a + * #define that provides the caller's name and __LINE__ to the double + * prefix version. + * + * If you wish to call the function without any debug or state checking, + * you should use the single _ prefix version (as is used by dependent IO + * routines, for example _iwl_legacy_read_direct32 calls the non-check version of + * _iwl_legacy_read32.) + * + * These declarations are *extremely* useful in quickly isolating code deltas + * which result in misconfiguration of the hardware I/O. In combination with + * git-bisect and the IO debug level you can quickly determine the specific + * commit which breaks the IO sequence to the hardware. + * + */ + +static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val) +{ + trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val); + iowrite8(val, priv->hw_base + ofs); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void +__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv, + u32 ofs, u8 val) +{ + IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l); + _iwl_legacy_write8(priv, ofs, val); +} +#define iwl_write8(priv, ofs, val) \ + __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val) +#else +#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val) +#endif + + +static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val) +{ + trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val); + iowrite32(val, priv->hw_base + ofs); +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void +__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv, + u32 ofs, u32 val) +{ + IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l); + _iwl_legacy_write32(priv, ofs, val); +} +#define iwl_write32(priv, ofs, val) \ + __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val) +#else +#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val) +#endif + +static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs) +{ + u32 val = ioread32(priv->hw_base + ofs); + trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val); + return val; +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline u32 +__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs) +{ + IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l); + return _iwl_legacy_read32(priv, ofs); +} +#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs) +#else +#define iwl_read32(p, o) _iwl_legacy_read32(p, o) +#endif + +#define IWL_POLL_INTERVAL 10 /* microseconds */ +static inline int +_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int t = 0; + + do { + if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask)) + return t; + udelay(IWL_POLL_INTERVAL); + t += IWL_POLL_INTERVAL; + } while (t < timeout); + + return -ETIMEDOUT; +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline int __iwl_legacy_poll_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 addr, + u32 bits, u32 mask, int timeout) +{ + int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout); + IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n", + addr, bits, mask, + unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l); + return ret; +} +#define iwl_poll_bit(priv, addr, bits, mask, timeout) \ + __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \ + bits, mask, timeout) +#else +#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t) +#endif + +static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void __iwl_legacy_set_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_legacy_read32(priv, reg) | mask; + IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, + mask, val); + _iwl_legacy_write32(priv, reg, val); +} +static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#else +static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + _iwl_legacy_set_bit(p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#endif + +static inline void +_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask) +{ + _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void +__iwl_legacy_clear_bit(const char *f, u32 l, + struct iwl_priv *priv, u32 reg, u32 mask) +{ + u32 val = _iwl_legacy_read32(priv, reg) & ~mask; + IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val); + _iwl_legacy_write32(priv, reg, val); +} +static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#else +static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&p->reg_lock, reg_flags); + _iwl_legacy_clear_bit(p, r, m); + spin_unlock_irqrestore(&p->reg_lock, reg_flags); +} +#endif + +static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv) +{ + int ret; + u32 val; + + /* this bit wakes up the NIC */ + _iwl_legacy_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* + * These bits say the device is running, and should keep running for + * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), + * but they do not indicate that embedded SRAM is restored yet; + * 3945 and 4965 have volatile SRAM, and must save/restore contents + * to/from host DRAM when sleeping/waking for power-saving. + * Each direction takes approximately 1/4 millisecond; with this + * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a + * series of register accesses are expected (e.g. reading Event Log), + * to keep device from sleeping. + * + * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that + * SRAM is okay/restored. We don't check that here because this call + * is just for hardware register access; but GP1 MAC_SLEEP check is a + * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). + * + */ + ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); + if (ret < 0) { + val = _iwl_legacy_read32(priv, CSR_GP_CNTRL); + IWL_ERR(priv, + "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val); + _iwl_legacy_write32(priv, CSR_RESET, + CSR_RESET_REG_FLAG_FORCE_NMI); + return -EIO; + } + + return 0; +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l); + return _iwl_legacy_grab_nic_access(priv); +} +#define iwl_grab_nic_access(priv) \ + __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv) +#else +#define iwl_grab_nic_access(priv) \ + _iwl_legacy_grab_nic_access(priv) +#endif + +static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv) +{ + _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline void __iwl_legacy_release_nic_access(const char *f, u32 l, + struct iwl_priv *priv) +{ + + IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l); + _iwl_legacy_release_nic_access(priv); +} +#define iwl_release_nic_access(priv) \ + __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv) +#else +#define iwl_release_nic_access(priv) \ + _iwl_legacy_release_nic_access(priv) +#endif + +static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) +{ + return _iwl_legacy_read32(priv, reg); +} +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l, + struct iwl_priv *priv, u32 reg) +{ + u32 value = _iwl_legacy_read_direct32(priv, reg); + IWL_DEBUG_IO(priv, + "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value, + f, l); + return value; +} +static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) +{ + u32 value; + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; +} + +#else +static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg) +{ + u32 value; + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + value = _iwl_legacy_read_direct32(priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; + +} +#endif + +static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv, + u32 reg, u32 value) +{ + _iwl_legacy_write32(priv, reg, value); +} +static inline void +iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_direct32(priv, reg, value); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv, + u32 reg, u32 len, u32 *values) +{ + u32 count = sizeof(u32); + + if ((priv != NULL) && (values != NULL)) { + for (; 0 < len; len -= count, reg += count, values++) + iwl_legacy_write_direct32(priv, reg, *values); + } +} + +static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr, + u32 mask, int timeout) +{ + int t = 0; + + do { + if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask) + return t; + udelay(IWL_POLL_INTERVAL); + t += IWL_POLL_INTERVAL; + } while (t < timeout); + + return -ETIMEDOUT; +} + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l, + struct iwl_priv *priv, + u32 addr, u32 mask, int timeout) +{ + int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout); + + if (unlikely(ret == -ETIMEDOUT)) + IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - " + "timedout - %s %d\n", addr, mask, f, l); + else + IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X " + "- %s %d\n", addr, mask, ret, f, l); + return ret; +} +#define iwl_poll_direct_bit(priv, addr, mask, timeout) \ +__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout) +#else +#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit +#endif + +static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg) +{ + _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); + rmb(); + return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT); +} +static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg) +{ + unsigned long reg_flags; + u32 val; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + val = _iwl_legacy_read_prph(priv, reg); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return val; +} + +static inline void _iwl_legacy_write_prph(struct iwl_priv *priv, + u32 addr, u32 val) +{ + _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR, + ((addr & 0x0000FFFF) | (3 << 24))); + wmb(); + _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val); +} + +static inline void +iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_prph(priv, addr, val); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +#define _iwl_legacy_set_bits_prph(priv, reg, mask) \ +_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask)) + +static inline void +iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + _iwl_legacy_set_bits_prph(priv, reg, mask); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \ +_iwl_legacy_write_prph(priv, reg, \ + ((_iwl_legacy_read_prph(priv, reg) & mask) | bits)) + +static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg, + u32 bits, u32 mask) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void iwl_legacy_clear_bits_prph(struct iwl_priv + *priv, u32 reg, u32 mask) +{ + unsigned long reg_flags; + u32 val; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + val = _iwl_legacy_read_prph(priv, reg); + _iwl_legacy_write_prph(priv, reg, (val & ~mask)); + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr) +{ + unsigned long reg_flags; + u32 value; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr); + rmb(); + value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return value; +} + +static inline void +iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); + wmb(); + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val); + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static inline void +iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr, + u32 len, u32 *values) +{ + unsigned long reg_flags; + + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (!iwl_grab_nic_access(priv)) { + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr); + wmb(); + for (; 0 < len; len -= sizeof(u32), values++) + _iwl_legacy_write_direct32(priv, + HBUS_TARG_MEM_WDAT, *values); + + iwl_release_nic_access(priv); + } + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c new file mode 100644 index 000000000000..15eb8b707157 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-led.c @@ -0,0 +1,188 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" + +/* default: IWL_LED_BLINK(0) using blinking index table */ +static int led_mode; +module_param(led_mode, int, S_IRUGO); +MODULE_PARM_DESC(led_mode, "0=system default, " + "1=On(RF On)/Off(RF Off), 2=blinking"); + +static const struct ieee80211_tpt_blink iwl_blink[] = { + { .throughput = 0 * 1024 - 1, .blink_time = 334 }, + { .throughput = 1 * 1024 - 1, .blink_time = 260 }, + { .throughput = 5 * 1024 - 1, .blink_time = 220 }, + { .throughput = 10 * 1024 - 1, .blink_time = 190 }, + { .throughput = 20 * 1024 - 1, .blink_time = 170 }, + { .throughput = 50 * 1024 - 1, .blink_time = 150 }, + { .throughput = 70 * 1024 - 1, .blink_time = 130 }, + { .throughput = 100 * 1024 - 1, .blink_time = 110 }, + { .throughput = 200 * 1024 - 1, .blink_time = 80 }, + { .throughput = 300 * 1024 - 1, .blink_time = 50 }, +}; + +/* + * Adjust led blink rate to compensate on a MAC Clock difference on every HW + * Led blink rate analysis showed an average deviation of 0% on 3945, + * 5% on 4965 HW. + * Need to compensate on the led on/off time per HW according to the deviation + * to achieve the desired led frequency + * The calculation is: (100-averageDeviation)/100 * blinkTime + * For code efficiency the calculation will be: + * compensation = (100 - averageDeviation) * 64 / 100 + * NewBlinkTime = (compensation * BlinkTime) / 64 + */ +static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv, + u8 time, u16 compensation) +{ + if (!compensation) { + IWL_ERR(priv, "undefined blink compensation: " + "use pre-defined blinking time\n"); + return time; + } + + return (u8)((time * compensation) >> 6); +} + +/* Set led pattern command */ +static int iwl_legacy_led_cmd(struct iwl_priv *priv, + unsigned long on, + unsigned long off) +{ + struct iwl_led_cmd led_cmd = { + .id = IWL_LED_LINK, + .interval = IWL_DEF_LED_INTRVL + }; + int ret; + + if (!test_bit(STATUS_READY, &priv->status)) + return -EBUSY; + + if (priv->blink_on == on && priv->blink_off == off) + return 0; + + IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", + priv->cfg->base_params->led_compensation); + led_cmd.on = iwl_legacy_blink_compensation(priv, on, + priv->cfg->base_params->led_compensation); + led_cmd.off = iwl_legacy_blink_compensation(priv, off, + priv->cfg->base_params->led_compensation); + + ret = priv->cfg->ops->led->cmd(priv, &led_cmd); + if (!ret) { + priv->blink_on = on; + priv->blink_off = off; + } + return ret; +} + +static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); + unsigned long on = 0; + + if (brightness > 0) + on = IWL_LED_SOLID; + + iwl_legacy_led_cmd(priv, on, 0); +} + +static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); + + return iwl_legacy_led_cmd(priv, *delay_on, *delay_off); +} + +void iwl_legacy_leds_init(struct iwl_priv *priv) +{ + int mode = led_mode; + int ret; + + if (mode == IWL_LED_DEFAULT) + mode = priv->cfg->led_mode; + + priv->led.name = kasprintf(GFP_KERNEL, "%s-led", + wiphy_name(priv->hw->wiphy)); + priv->led.brightness_set = iwl_legacy_led_brightness_set; + priv->led.blink_set = iwl_legacy_led_blink_set; + priv->led.max_brightness = 1; + + switch (mode) { + case IWL_LED_DEFAULT: + WARN_ON(1); + break; + case IWL_LED_BLINK: + priv->led.default_trigger = + ieee80211_create_tpt_led_trigger(priv->hw, + IEEE80211_TPT_LEDTRIG_FL_CONNECTED, + iwl_blink, ARRAY_SIZE(iwl_blink)); + break; + case IWL_LED_RF_STATE: + priv->led.default_trigger = + ieee80211_get_radio_led_name(priv->hw); + break; + } + + ret = led_classdev_register(&priv->pci_dev->dev, &priv->led); + if (ret) { + kfree(priv->led.name); + return; + } + + priv->led_registered = true; +} +EXPORT_SYMBOL(iwl_legacy_leds_init); + +void iwl_legacy_leds_exit(struct iwl_priv *priv) +{ + if (!priv->led_registered) + return; + + led_classdev_unregister(&priv->led); + kfree(priv->led.name); +} +EXPORT_SYMBOL(iwl_legacy_leds_exit); diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h new file mode 100644 index 000000000000..f0791f70f79d --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-led.h @@ -0,0 +1,56 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_leds_h__ +#define __iwl_legacy_leds_h__ + + +struct iwl_priv; + +#define IWL_LED_SOLID 11 +#define IWL_DEF_LED_INTRVL cpu_to_le32(1000) + +#define IWL_LED_ACTIVITY (0<<1) +#define IWL_LED_LINK (1<<1) + +/* + * LED mode + * IWL_LED_DEFAULT: use device default + * IWL_LED_RF_STATE: turn LED on/off based on RF state + * LED ON = RF ON + * LED OFF = RF OFF + * IWL_LED_BLINK: adjust led blink rate based on blink table + */ +enum iwl_led_mode { + IWL_LED_DEFAULT, + IWL_LED_RF_STATE, + IWL_LED_BLINK, +}; + +void iwl_legacy_leds_init(struct iwl_priv *priv); +void iwl_legacy_leds_exit(struct iwl_priv *priv); + +#endif /* __iwl_legacy_leds_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h new file mode 100644 index 000000000000..38647e481eb0 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h @@ -0,0 +1,456 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_rs_h__ +#define __iwl_legacy_rs_h__ + +struct iwl_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ + u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ +}; + +struct iwl3945_rate_info { + u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ + u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ + u8 prev_ieee; /* previous rate in IEEE speeds */ + u8 next_ieee; /* next rate in IEEE speeds */ + u8 prev_rs; /* previous rate used in rs algo */ + u8 next_rs; /* next rate used in rs algo */ + u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ + u8 next_rs_tgg; /* next rate used in TGG rs algo */ + u8 table_rs_index; /* index in rate scale table cmd */ + u8 prev_table_rs; /* prev in rate table cmd */ +}; + + +/* + * These serve as indexes into + * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT]; + */ +enum { + IWL_RATE_1M_INDEX = 0, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_RATE_6M_INDEX, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_RATE_60M_INDEX, + IWL_RATE_COUNT, + IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */ + IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1, + IWL_RATE_INVM_INDEX = IWL_RATE_COUNT, + IWL_RATE_INVALID = IWL_RATE_COUNT, +}; + +enum { + IWL_RATE_6M_INDEX_TABLE = 0, + IWL_RATE_9M_INDEX_TABLE, + IWL_RATE_12M_INDEX_TABLE, + IWL_RATE_18M_INDEX_TABLE, + IWL_RATE_24M_INDEX_TABLE, + IWL_RATE_36M_INDEX_TABLE, + IWL_RATE_48M_INDEX_TABLE, + IWL_RATE_54M_INDEX_TABLE, + IWL_RATE_1M_INDEX_TABLE, + IWL_RATE_2M_INDEX_TABLE, + IWL_RATE_5M_INDEX_TABLE, + IWL_RATE_11M_INDEX_TABLE, + IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1, +}; + +enum { + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX, + IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, +}; + +/* #define vs. enum to keep from defaulting to 'large integer' */ +#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX) +#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX) +#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX) +#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX) +#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX) +#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX) +#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX) +#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX) +#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX) +#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX) +#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX) +#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX) +#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX) + +/* uCode API values for legacy bit rates, both OFDM and CCK */ +enum { + IWL_RATE_6M_PLCP = 13, + IWL_RATE_9M_PLCP = 15, + IWL_RATE_12M_PLCP = 5, + IWL_RATE_18M_PLCP = 7, + IWL_RATE_24M_PLCP = 9, + IWL_RATE_36M_PLCP = 11, + IWL_RATE_48M_PLCP = 1, + IWL_RATE_54M_PLCP = 3, + IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/ + IWL_RATE_1M_PLCP = 10, + IWL_RATE_2M_PLCP = 20, + IWL_RATE_5M_PLCP = 55, + IWL_RATE_11M_PLCP = 110, + /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/ +}; + +/* uCode API values for OFDM high-throughput (HT) bit rates */ +enum { + IWL_RATE_SISO_6M_PLCP = 0, + IWL_RATE_SISO_12M_PLCP = 1, + IWL_RATE_SISO_18M_PLCP = 2, + IWL_RATE_SISO_24M_PLCP = 3, + IWL_RATE_SISO_36M_PLCP = 4, + IWL_RATE_SISO_48M_PLCP = 5, + IWL_RATE_SISO_54M_PLCP = 6, + IWL_RATE_SISO_60M_PLCP = 7, + IWL_RATE_MIMO2_6M_PLCP = 0x8, + IWL_RATE_MIMO2_12M_PLCP = 0x9, + IWL_RATE_MIMO2_18M_PLCP = 0xa, + IWL_RATE_MIMO2_24M_PLCP = 0xb, + IWL_RATE_MIMO2_36M_PLCP = 0xc, + IWL_RATE_MIMO2_48M_PLCP = 0xd, + IWL_RATE_MIMO2_54M_PLCP = 0xe, + IWL_RATE_MIMO2_60M_PLCP = 0xf, + IWL_RATE_SISO_INVM_PLCP, + IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, +}; + +/* MAC header values for bit rates */ +enum { + IWL_RATE_6M_IEEE = 12, + IWL_RATE_9M_IEEE = 18, + IWL_RATE_12M_IEEE = 24, + IWL_RATE_18M_IEEE = 36, + IWL_RATE_24M_IEEE = 48, + IWL_RATE_36M_IEEE = 72, + IWL_RATE_48M_IEEE = 96, + IWL_RATE_54M_IEEE = 108, + IWL_RATE_60M_IEEE = 120, + IWL_RATE_1M_IEEE = 2, + IWL_RATE_2M_IEEE = 4, + IWL_RATE_5M_IEEE = 11, + IWL_RATE_11M_IEEE = 22, +}; + +#define IWL_CCK_BASIC_RATES_MASK \ + (IWL_RATE_1M_MASK | \ + IWL_RATE_2M_MASK) + +#define IWL_CCK_RATES_MASK \ + (IWL_CCK_BASIC_RATES_MASK | \ + IWL_RATE_5M_MASK | \ + IWL_RATE_11M_MASK) + +#define IWL_OFDM_BASIC_RATES_MASK \ + (IWL_RATE_6M_MASK | \ + IWL_RATE_12M_MASK | \ + IWL_RATE_24M_MASK) + +#define IWL_OFDM_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_RATE_9M_MASK | \ + IWL_RATE_18M_MASK | \ + IWL_RATE_36M_MASK | \ + IWL_RATE_48M_MASK | \ + IWL_RATE_54M_MASK) + +#define IWL_BASIC_RATES_MASK \ + (IWL_OFDM_BASIC_RATES_MASK | \ + IWL_CCK_BASIC_RATES_MASK) + +#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) +#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1) + +#define IWL_INVALID_VALUE -1 + +#define IWL_MIN_RSSI_VAL -100 +#define IWL_MAX_RSSI_VAL 0 + +/* These values specify how many Tx frame attempts before + * searching for a new modulation mode */ +#define IWL_LEGACY_FAILURE_LIMIT 160 +#define IWL_LEGACY_SUCCESS_LIMIT 480 +#define IWL_LEGACY_TABLE_COUNT 160 + +#define IWL_NONE_LEGACY_FAILURE_LIMIT 400 +#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500 +#define IWL_NONE_LEGACY_TABLE_COUNT 1500 + +/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */ +#define IWL_RS_GOOD_RATIO 12800 /* 100% */ +#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */ +#define IWL_RATE_HIGH_TH 10880 /* 85% */ +#define IWL_RATE_INCREASE_TH 6400 /* 50% */ +#define IWL_RATE_DECREASE_TH 1920 /* 15% */ + +/* possible actions when in legacy mode */ +#define IWL_LEGACY_SWITCH_ANTENNA1 0 +#define IWL_LEGACY_SWITCH_ANTENNA2 1 +#define IWL_LEGACY_SWITCH_SISO 2 +#define IWL_LEGACY_SWITCH_MIMO2_AB 3 +#define IWL_LEGACY_SWITCH_MIMO2_AC 4 +#define IWL_LEGACY_SWITCH_MIMO2_BC 5 + +/* possible actions when in siso mode */ +#define IWL_SISO_SWITCH_ANTENNA1 0 +#define IWL_SISO_SWITCH_ANTENNA2 1 +#define IWL_SISO_SWITCH_MIMO2_AB 2 +#define IWL_SISO_SWITCH_MIMO2_AC 3 +#define IWL_SISO_SWITCH_MIMO2_BC 4 +#define IWL_SISO_SWITCH_GI 5 + +/* possible actions when in mimo mode */ +#define IWL_MIMO2_SWITCH_ANTENNA1 0 +#define IWL_MIMO2_SWITCH_ANTENNA2 1 +#define IWL_MIMO2_SWITCH_SISO_A 2 +#define IWL_MIMO2_SWITCH_SISO_B 3 +#define IWL_MIMO2_SWITCH_SISO_C 4 +#define IWL_MIMO2_SWITCH_GI 5 + +#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI + +#define IWL_ACTION_LIMIT 3 /* # possible actions */ + +#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ + +/* load per tid defines for A-MPDU activation */ +#define IWL_AGG_TPT_THREHOLD 0 +#define IWL_AGG_LOAD_THRESHOLD 10 +#define IWL_AGG_ALL_TID 0xff +#define TID_QUEUE_CELL_SPACING 50 /*mS */ +#define TID_QUEUE_MAX_SIZE 20 +#define TID_ROUND_VALUE 5 /* mS */ +#define TID_MAX_LOAD_COUNT 8 + +#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING) +#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y)) + +extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT]; + +enum iwl_table_type { + LQ_NONE, + LQ_G, /* legacy types */ + LQ_A, + LQ_SISO, /* high-throughput types */ + LQ_MIMO2, + LQ_MAX, +}; + +#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) +#define is_siso(tbl) ((tbl) == LQ_SISO) +#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) +#define is_mimo(tbl) (is_mimo2(tbl)) +#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) +#define is_a_band(tbl) ((tbl) == LQ_A) +#define is_g_and(tbl) ((tbl) == LQ_G) + +#define ANT_NONE 0x0 +#define ANT_A BIT(0) +#define ANT_B BIT(1) +#define ANT_AB (ANT_A | ANT_B) +#define ANT_C BIT(2) +#define ANT_AC (ANT_A | ANT_C) +#define ANT_BC (ANT_B | ANT_C) +#define ANT_ABC (ANT_AB | ANT_C) + +#define IWL_MAX_MCS_DISPLAY_SIZE 12 + +struct iwl_rate_mcs_info { + char mbps[IWL_MAX_MCS_DISPLAY_SIZE]; + char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; +}; + +/** + * struct iwl_rate_scale_data -- tx success history for one rate + */ +struct iwl_rate_scale_data { + u64 data; /* bitmap of successful frames */ + s32 success_counter; /* number of frames successful */ + s32 success_ratio; /* per-cent * 128 */ + s32 counter; /* number of frames attempted */ + s32 average_tpt; /* success ratio * expected throughput */ + unsigned long stamp; +}; + +/** + * struct iwl_scale_tbl_info -- tx params and success history for all rates + * + * There are two of these in struct iwl_lq_sta, + * one for "active", and one for "search". + */ +struct iwl_scale_tbl_info { + enum iwl_table_type lq_type; + u8 ant_type; + u8 is_SGI; /* 1 = short guard interval */ + u8 is_ht40; /* 1 = 40 MHz channel width */ + u8 is_dup; /* 1 = duplicated data streams */ + u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */ + u8 max_search; /* maximun number of tables we can search */ + s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */ + u32 current_rate; /* rate_n_flags, uCode API format */ + struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ +}; + +struct iwl_traffic_load { + unsigned long time_stamp; /* age of the oldest statistics */ + u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time + * slice */ + u32 total; /* total num of packets during the + * last TID_MAX_TIME_DIFF */ + u8 queue_count; /* number of queues that has + * been used since the last cleanup */ + u8 head; /* start of the circular buffer */ +}; + +/** + * struct iwl_lq_sta -- driver's rate scaling private structure + * + * Pointer to this gets passed back and forth between driver and mac80211. + */ +struct iwl_lq_sta { + u8 active_tbl; /* index of active table, range 0-1 */ + u8 enable_counter; /* indicates HT mode */ + u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */ + u8 search_better_tbl; /* 1: currently trying alternate mode */ + s32 last_tpt; + + /* The following determine when to search for a new mode */ + u32 table_count_limit; + u32 max_failure_limit; /* # failed frames before new search */ + u32 max_success_limit; /* # successful frames before new search */ + u32 table_count; + u32 total_failed; /* total failed frames, any/all rates */ + u32 total_success; /* total successful frames, any/all rates */ + u64 flush_timer; /* time staying in mode before new search */ + + u8 action_counter; /* # mode-switch actions tried */ + u8 is_green; + u8 is_dup; + enum ieee80211_band band; + + /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ + u32 supp_rates; + u16 active_legacy_rate; + u16 active_siso_rate; + u16 active_mimo2_rate; + s8 max_rate_idx; /* Max rate set by user */ + u8 missed_rate_counter; + + struct iwl_link_quality_cmd lq; + struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ + struct iwl_traffic_load load[TID_MAX_LOAD_COUNT]; + u8 tx_agg_tid_en; +#ifdef CONFIG_MAC80211_DEBUGFS + struct dentry *rs_sta_dbgfs_scale_table_file; + struct dentry *rs_sta_dbgfs_stats_table_file; + struct dentry *rs_sta_dbgfs_rate_scale_data_file; + struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; + u32 dbg_fixed_rate; +#endif + struct iwl_priv *drv; + + /* used to be in sta_info */ + int last_txrate_idx; + /* last tx rate_n_flags */ + u32 last_rate_n_flags; + /* packets destined for this STA are aggregated */ + u8 is_agg; +}; + +static inline u8 iwl4965_num_of_ant(u8 mask) +{ + return !!((mask) & ANT_A) + + !!((mask) & ANT_B) + + !!((mask) & ANT_C); +} + +static inline u8 iwl4965_first_antenna(u8 mask) +{ + if (mask & ANT_A) + return ANT_A; + if (mask & ANT_B) + return ANT_B; + return ANT_C; +} + + +/** + * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info + * + * The specific throughput table used is based on the type of network + * the associated with, including A, B, G, and G w/ TGG protection + */ +extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); + +/* Initialize station's rate scaling information after adding station */ +extern void iwl4965_rs_rate_init(struct iwl_priv *priv, + struct ieee80211_sta *sta, u8 sta_id); +extern void iwl3945_rs_rate_init(struct iwl_priv *priv, + struct ieee80211_sta *sta, u8 sta_id); + +/** + * iwl_rate_control_register - Register the rate control algorithm callbacks + * + * Since the rate control algorithm is hardware specific, there is no need + * or reason to place it as a stand alone module. The driver can call + * iwl_rate_control_register in order to register the rate control callbacks + * with the mac80211 subsystem. This should be performed prior to calling + * ieee80211_register_hw + * + */ +extern int iwl4965_rate_control_register(void); +extern int iwl3945_rate_control_register(void); + +/** + * iwl_rate_control_unregister - Unregister the rate control callbacks + * + * This should be called after calling ieee80211_unregister_hw, but before + * the driver is unloaded. + */ +extern void iwl4965_rate_control_unregister(void); +extern void iwl3945_rate_control_unregister(void); + +#endif /* __iwl_legacy_rs__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c new file mode 100644 index 000000000000..903ef0d6d6cb --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-power.c @@ -0,0 +1,165 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ + + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> + +#include <net/mac80211.h> + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-commands.h" +#include "iwl-debug.h" +#include "iwl-power.h" + +/* + * Setting power level allows the card to go to sleep when not busy. + * + * We calculate a sleep command based on the required latency, which + * we get from mac80211. In order to handle thermal throttling, we can + * also use pre-defined power levels. + */ + +/* + * This defines the old power levels. They are still used by default + * (level 1) and for thermal throttle (levels 3 through 5) + */ + +struct iwl_power_vec_entry { + struct iwl_powertable_cmd cmd; + u8 no_dtim; /* number of skip dtim */ +}; + +static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv, + struct iwl_powertable_cmd *cmd) +{ + memset(cmd, 0, sizeof(*cmd)); + + if (priv->power_data.pci_pm) + cmd->flags |= IWL_POWER_PCI_PM_MSK; + + IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); +} + +static int +iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) +{ + IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); + IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags); + IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", + le32_to_cpu(cmd->tx_data_timeout)); + IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", + le32_to_cpu(cmd->rx_data_timeout)); + IWL_DEBUG_POWER(priv, + "Sleep interval vector = { %d , %d , %d , %d , %d }\n", + le32_to_cpu(cmd->sleep_interval[0]), + le32_to_cpu(cmd->sleep_interval[1]), + le32_to_cpu(cmd->sleep_interval[2]), + le32_to_cpu(cmd->sleep_interval[3]), + le32_to_cpu(cmd->sleep_interval[4])); + + return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD, + sizeof(struct iwl_powertable_cmd), cmd); +} + +int +iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, + bool force) +{ + int ret; + bool update_chains; + + lockdep_assert_held(&priv->mutex); + + /* Don't update the RX chain when chain noise calibration is running */ + update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE || + priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE; + + if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) + return 0; + + if (!iwl_legacy_is_ready_rf(priv)) + return -EIO; + + /* scan complete use sleep_power_next, need to be updated */ + memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); + if (test_bit(STATUS_SCANNING, &priv->status) && !force) { + IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n"); + return 0; + } + + if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) + set_bit(STATUS_POWER_PMI, &priv->status); + + ret = iwl_legacy_set_power(priv, cmd); + if (!ret) { + if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) + clear_bit(STATUS_POWER_PMI, &priv->status); + + if (priv->cfg->ops->lib->update_chain_flags && update_chains) + priv->cfg->ops->lib->update_chain_flags(priv); + else if (priv->cfg->ops->lib->update_chain_flags) + IWL_DEBUG_POWER(priv, + "Cannot update the power, chain noise " + "calibration running: %d\n", + priv->chain_noise_data.state); + + memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)); + } else + IWL_ERR(priv, "set power fail, ret = %d", ret); + + return ret; +} + +int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force) +{ + struct iwl_powertable_cmd cmd; + + iwl_legacy_power_sleep_cam_cmd(priv, &cmd); + return iwl_legacy_power_set_mode(priv, &cmd, force); +} +EXPORT_SYMBOL(iwl_legacy_power_update_mode); + +/* initialize to default */ +void iwl_legacy_power_initialize(struct iwl_priv *priv) +{ + u16 lctl = iwl_legacy_pcie_link_ctl(priv); + + priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); + + priv->power_data.debug_sleep_level_override = -1; + + memset(&priv->power_data.sleep_cmd, 0, + sizeof(priv->power_data.sleep_cmd)); +} +EXPORT_SYMBOL(iwl_legacy_power_initialize); diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h new file mode 100644 index 000000000000..d30b36acdc4a --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-power.h @@ -0,0 +1,55 @@ +/****************************************************************************** + * + * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#ifndef __iwl_legacy_power_setting_h__ +#define __iwl_legacy_power_setting_h__ + +#include "iwl-commands.h" + +enum iwl_power_level { + IWL_POWER_INDEX_1, + IWL_POWER_INDEX_2, + IWL_POWER_INDEX_3, + IWL_POWER_INDEX_4, + IWL_POWER_INDEX_5, + IWL_POWER_NUM +}; + +struct iwl_power_mgr { + struct iwl_powertable_cmd sleep_cmd; + struct iwl_powertable_cmd sleep_cmd_next; + int debug_sleep_level_override; + bool pci_pm; +}; + +int +iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, + bool force); +int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force); +void iwl_legacy_power_initialize(struct iwl_priv *priv); + +#endif /* __iwl_legacy_power_setting_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/iwl-prph.h new file mode 100644 index 000000000000..30a493003ab0 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-prph.h @@ -0,0 +1,523 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_legacy_prph_h__ +#define __iwl_legacy_prph_h__ + +/* + * Registers in this file are internal, not PCI bus memory mapped. + * Driver accesses these via HBUS_TARG_PRPH_* registers. + */ +#define PRPH_BASE (0x00000) +#define PRPH_END (0xFFFFF) + +/* APMG (power management) constants */ +#define APMG_BASE (PRPH_BASE + 0x3000) +#define APMG_CLK_CTRL_REG (APMG_BASE + 0x0000) +#define APMG_CLK_EN_REG (APMG_BASE + 0x0004) +#define APMG_CLK_DIS_REG (APMG_BASE + 0x0008) +#define APMG_PS_CTRL_REG (APMG_BASE + 0x000c) +#define APMG_PCIDEV_STT_REG (APMG_BASE + 0x0010) +#define APMG_RFKILL_REG (APMG_BASE + 0x0014) +#define APMG_RTC_INT_STT_REG (APMG_BASE + 0x001c) +#define APMG_RTC_INT_MSK_REG (APMG_BASE + 0x0020) +#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058) +#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C) + +#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001) +#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) +#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) + +#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000) +#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) +#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000) +#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */ +#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000) +#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */ +#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060) + +#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800) + +/** + * BSM (Bootstrap State Machine) + * + * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program + * in special SRAM that does not power down when the embedded control + * processor is sleeping (e.g. for periodic power-saving shutdowns of radio). + * + * When powering back up after sleeps (or during initial uCode load), the BSM + * internally loads the short bootstrap program from the special SRAM into the + * embedded processor's instruction SRAM, and starts the processor so it runs + * the bootstrap program. + * + * This bootstrap program loads (via PCI busmaster DMA) instructions and data + * images for a uCode program from host DRAM locations. The host driver + * indicates DRAM locations and sizes for instruction and data images via the + * four BSM_DRAM_* registers. Once the bootstrap program loads the new program, + * the new program starts automatically. + * + * The uCode used for open-source drivers includes two programs: + * + * 1) Initialization -- performs hardware calibration and sets up some + * internal data, then notifies host via "initialize alive" notification + * (struct iwl_init_alive_resp) that it has completed all of its work. + * After signal from host, it then loads and starts the runtime program. + * The initialization program must be used when initially setting up the + * NIC after loading the driver. + * + * 2) Runtime/Protocol -- performs all normal runtime operations. This + * notifies host via "alive" notification (struct iwl_alive_resp) that it + * is ready to be used. + * + * When initializing the NIC, the host driver does the following procedure: + * + * 1) Load bootstrap program (instructions only, no data image for bootstrap) + * into bootstrap memory. Use dword writes starting at BSM_SRAM_LOWER_BOUND + * + * 2) Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction + * images in host DRAM. + * + * 3) Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked: + * BSM_WR_MEM_SRC_REG = 0 + * BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND + * BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image + * + * 4) Load bootstrap into instruction SRAM: + * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START + * + * 5) Wait for load completion: + * Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0 + * + * 6) Enable future boot loads whenever NIC's power management triggers it: + * BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN + * + * 7) Start the NIC by removing all reset bits: + * CSR_RESET = 0 + * + * The bootstrap uCode (already in instruction SRAM) loads initialization + * uCode. Initialization uCode performs data initialization, sends + * "initialize alive" notification to host, and waits for a signal from + * host to load runtime code. + * + * 4) Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction + * images in host DRAM. The last register loaded must be the instruction + * byte count register ("1" in MSbit tells initialization uCode to load + * the runtime uCode): + * BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD + * + * 5) Wait for "alive" notification, then issue normal runtime commands. + * + * Data caching during power-downs: + * + * Just before the embedded controller powers down (e.g for automatic + * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA) + * a current snapshot of the embedded processor's data SRAM into host DRAM. + * This caches the data while the embedded processor's memory is powered down. + * Location and size are controlled by BSM_DRAM_DATA_* registers. + * + * NOTE: Instruction SRAM does not need to be saved, since that doesn't + * change during operation; the original image (from uCode distribution + * file) can be used for reload. + * + * When powering back up, the BSM loads the bootstrap program. Bootstrap looks + * at the BSM_DRAM_* registers, which now point to the runtime instruction + * image and the cached (modified) runtime data (*not* the initialization + * uCode). Bootstrap reloads these runtime images into SRAM, and restarts the + * uCode from where it left off before the power-down. + * + * NOTE: Initialization uCode does *not* run as part of the save/restore + * procedure. + * + * This save/restore method is mostly for autonomous power management during + * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and + * RFKILL should use complete restarts (with total re-initialization) of uCode, + * allowing total shutdown (including BSM memory). + * + * Note that, during normal operation, the host DRAM that held the initial + * startup data for the runtime code is now being used as a backup data cache + * for modified data! If you need to completely re-initialize the NIC, make + * sure that you use the runtime data image from the uCode distribution file, + * not the modified/saved runtime data. You may want to store a separate + * "clean" runtime data image in DRAM to avoid disk reads of distribution file. + */ + +/* BSM bit fields */ +#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */ +#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/ +#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */ + +/* BSM addresses */ +#define BSM_BASE (PRPH_BASE + 0x3400) +#define BSM_END (PRPH_BASE + 0x3800) + +#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */ +#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */ +#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */ +#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */ +#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */ + +/* + * Pointers and size regs for bootstrap load and data SRAM save/restore. + * NOTE: 3945 pointers use bits 31:0 of DRAM address. + * 4965 pointers use bits 35:4 of DRAM address. + */ +#define BSM_DRAM_INST_PTR_REG (BSM_BASE + 0x090) +#define BSM_DRAM_INST_BYTECOUNT_REG (BSM_BASE + 0x094) +#define BSM_DRAM_DATA_PTR_REG (BSM_BASE + 0x098) +#define BSM_DRAM_DATA_BYTECOUNT_REG (BSM_BASE + 0x09C) + +/* + * BSM special memory, stays powered on during power-save sleeps. + * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1) + */ +#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800) +#define BSM_SRAM_SIZE (1024) /* bytes */ + + +/* 3945 Tx scheduler registers */ +#define ALM_SCD_BASE (PRPH_BASE + 0x2E00) +#define ALM_SCD_MODE_REG (ALM_SCD_BASE + 0x000) +#define ALM_SCD_ARASTAT_REG (ALM_SCD_BASE + 0x004) +#define ALM_SCD_TXFACT_REG (ALM_SCD_BASE + 0x010) +#define ALM_SCD_TXF4MF_REG (ALM_SCD_BASE + 0x014) +#define ALM_SCD_TXF5MF_REG (ALM_SCD_BASE + 0x020) +#define ALM_SCD_SBYP_MODE_1_REG (ALM_SCD_BASE + 0x02C) +#define ALM_SCD_SBYP_MODE_2_REG (ALM_SCD_BASE + 0x030) + +/** + * Tx Scheduler + * + * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs + * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in + * host DRAM. It steers each frame's Tx command (which contains the frame + * data) into one of up to 7 prioritized Tx DMA FIFO channels within the + * device. A queue maps to only one (selectable by driver) Tx DMA channel, + * but one DMA channel may take input from several queues. + * + * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows + * (cf. default_queue_to_tx_fifo in iwl-4965.c): + * + * 0 -- EDCA BK (background) frames, lowest priority + * 1 -- EDCA BE (best effort) frames, normal priority + * 2 -- EDCA VI (video) frames, higher priority + * 3 -- EDCA VO (voice) and management frames, highest priority + * 4 -- Commands (e.g. RXON, etc.) + * 5 -- unused (HCCA) + * 6 -- unused (HCCA) + * 7 -- not used by driver (device-internal only) + * + * + * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6. + * In addition, driver can map the remaining queues to Tx DMA/FIFO + * channels 0-3 to support 11n aggregation via EDCA DMA channels. + * + * The driver sets up each queue to work in one of two modes: + * + * 1) Scheduler-Ack, in which the scheduler automatically supports a + * block-ack (BA) window of up to 64 TFDs. In this mode, each queue + * contains TFDs for a unique combination of Recipient Address (RA) + * and Traffic Identifier (TID), that is, traffic of a given + * Quality-Of-Service (QOS) priority, destined for a single station. + * + * In scheduler-ack mode, the scheduler keeps track of the Tx status of + * each frame within the BA window, including whether it's been transmitted, + * and whether it's been acknowledged by the receiving station. The device + * automatically processes block-acks received from the receiving STA, + * and reschedules un-acked frames to be retransmitted (successful + * Tx completion may end up being out-of-order). + * + * The driver must maintain the queue's Byte Count table in host DRAM + * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode. + * This mode does not support fragmentation. + * + * 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order. + * The device may automatically retry Tx, but will retry only one frame + * at a time, until receiving ACK from receiving station, or reaching + * retry limit and giving up. + * + * The command queue (#4/#9) must use this mode! + * This mode does not require use of the Byte Count table in host DRAM. + * + * Driver controls scheduler operation via 3 means: + * 1) Scheduler registers + * 2) Shared scheduler data base in internal 4956 SRAM + * 3) Shared data in host DRAM + * + * Initialization: + * + * When loading, driver should allocate memory for: + * 1) 16 TFD circular buffers, each with space for (typically) 256 TFDs. + * 2) 16 Byte Count circular buffers in 16 KBytes contiguous memory + * (1024 bytes for each queue). + * + * After receiving "Alive" response from uCode, driver must initialize + * the scheduler (especially for queue #4/#9, the command queue, otherwise + * the driver can't issue commands!): + */ + +/** + * Max Tx window size is the max number of contiguous TFDs that the scheduler + * can keep track of at one time when creating block-ack chains of frames. + * Note that "64" matches the number of ack bits in a block-ack packet. + * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize + * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values. + */ +#define SCD_WIN_SIZE 64 +#define SCD_FRAME_LIMIT 64 + +/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */ +#define IWL49_SCD_START_OFFSET 0xa02c00 + +/* + * 4965 tells driver SRAM address for internal scheduler structs via this reg. + * Value is valid only after "Alive" response from uCode. + */ +#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0) + +/* + * Driver may need to update queue-empty bits after changing queue's + * write and read pointers (indexes) during (re-)initialization (i.e. when + * scheduler is not tracking what's happening). + * Bit fields: + * 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit + * 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty + * NOTE: This register is not used by Linux driver. + */ +#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4) + +/* + * Physical base address of array of byte count (BC) circular buffers (CBs). + * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode. + * This register points to BC CB for queue 0, must be on 1024-byte boundary. + * Others are spaced by 1024 bytes. + * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad. + * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff). + * Bit fields: + * 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned. + */ +#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10) + +/* + * Enables any/all Tx DMA/FIFO channels. + * Scheduler generates requests for only the active channels. + * Set this to 0xff to enable all 8 channels (normal usage). + * Bit fields: + * 7- 0: Enable (1), disable (0), one bit for each channel 0-7 + */ +#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c) +/* + * Queue (x) Write Pointers (indexes, really!), one for each Tx queue. + * Initialized and updated by driver as new TFDs are added to queue. + * NOTE: If using Block Ack, index must correspond to frame's + * Start Sequence Number; index = (SSN & 0xff) + * NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses? + */ +#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4) + +/* + * Queue (x) Read Pointers (indexes, really!), one for each Tx queue. + * For FIFO mode, index indicates next frame to transmit. + * For Scheduler-ACK mode, index indicates first frame in Tx window. + * Initialized by driver, updated by scheduler. + */ +#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4) + +/* + * Select which queues work in chain mode (1) vs. not (0). + * Use chain mode to build chains of aggregated frames. + * Bit fields: + * 31-16: Reserved + * 15-00: Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time + * NOTE: If driver sets up queue for chain mode, it should be also set up + * Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x). + */ +#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0) + +/* + * Select which queues interrupt driver when scheduler increments + * a queue's read pointer (index). + * Bit fields: + * 31-16: Reserved + * 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled + * NOTE: This functionality is apparently a no-op; driver relies on interrupts + * from Rx queue to read Tx command responses and update Tx queues. + */ +#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4) + +/* + * Queue search status registers. One for each queue. + * Sets up queue mode and assigns queue to Tx DMA channel. + * Bit fields: + * 19-10: Write mask/enable bits for bits 0-9 + * 9: Driver should init to "0" + * 8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0). + * Driver should init to "1" for aggregation mode, or "0" otherwise. + * 7-6: Driver should init to "0" + * 5: Window Size Left; indicates whether scheduler can request + * another TFD, based on window size, etc. Driver should init + * this bit to "1" for aggregation mode, or "0" for non-agg. + * 4-1: Tx FIFO to use (range 0-7). + * 0: Queue is active (1), not active (0). + * Other bits should be written as "0" + * + * NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled + * via SCD_QUEUECHAIN_SEL. + */ +#define IWL49_SCD_QUEUE_STATUS_BITS(x)\ + (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4) + +/* Bit field positions */ +#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0) +#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1) +#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5) +#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8) + +/* Write masks */ +#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10) +#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00) + +/** + * 4965 internal SRAM structures for scheduler, shared with driver ... + * + * Driver should clear and initialize the following areas after receiving + * "Alive" response from 4965 uCode, i.e. after initial + * uCode load, or after a uCode load done for error recovery: + * + * SCD_CONTEXT_DATA_OFFSET (size 128 bytes) + * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes) + * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes) + * + * Driver accesses SRAM via HBUS_TARG_MEM_* registers. + * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR. + * All OFFSET values must be added to this base address. + */ + +/* + * Queue context. One 8-byte entry for each of 16 queues. + * + * Driver should clear this entire area (size 0x80) to 0 after receiving + * "Alive" notification from uCode. Additionally, driver should init + * each queue's entry as follows: + * + * LS Dword bit fields: + * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64. + * + * MS Dword bit fields: + * 16-22: Frame limit. Driver should init to 10 (0xa). + * + * Driver should init all other bits to 0. + * + * Init must be done after driver receives "Alive" response from 4965 uCode, + * and when setting up queue for aggregation. + */ +#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380 +#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \ + (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) + +#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0) +#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F) +#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) +#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) + +/* + * Tx Status Bitmap + * + * Driver should clear this entire area (size 0x100) to 0 after receiving + * "Alive" notification from uCode. Area is used only by device itself; + * no other support (besides clearing) is required from driver. + */ +#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400 + +/* + * RAxTID to queue translation mapping. + * + * When queue is in Scheduler-ACK mode, frames placed in a that queue must be + * for only one combination of receiver address (RA) and traffic ID (TID), i.e. + * one QOS priority level destined for one station (for this wireless link, + * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit + * mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK + * mode, the device ignores the mapping value. + * + * Bit fields, for each 16-bit map: + * 15-9: Reserved, set to 0 + * 8-4: Index into device's station table for recipient station + * 3-0: Traffic ID (tid), range 0-15 + * + * Driver should clear this entire area (size 32 bytes) to 0 after receiving + * "Alive" notification from uCode. To update a 16-bit map value, driver + * must read a dword-aligned value from device SRAM, replace the 16-bit map + * value of interest, and write the dword value back into device SRAM. + */ +#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500 + +/* Find translation table dword to read/write for given queue */ +#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ + ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc) + +#define IWL_SCD_TXFIFO_POS_TID (0) +#define IWL_SCD_TXFIFO_POS_RA (4) +#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) + +/*********************** END TX SCHEDULER *************************************/ + +#endif /* __iwl_legacy_prph_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c new file mode 100644 index 000000000000..654cf233a384 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-rx.c @@ -0,0 +1,302 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/etherdevice.h> +#include <linux/slab.h> +#include <net/mac80211.h> +#include <asm/unaligned.h> +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +/************************** RX-FUNCTIONS ****************************/ +/* + * Rx theory of operation + * + * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), + * each of which point to Receive Buffers to be filled by the NIC. These get + * used not only for Rx frames, but for any command response or notification + * from the NIC. The driver and NIC manage the Rx buffers by means + * of indexes into the circular buffer. + * + * Rx Queue Indexes + * The host/firmware share two index registers for managing the Rx buffers. + * + * The READ index maps to the first position that the firmware may be writing + * to -- the driver can read up to (but not including) this position and get + * good data. + * The READ index is managed by the firmware once the card is enabled. + * + * The WRITE index maps to the last position the driver has read from -- the + * position preceding WRITE is the last slot the firmware can place a packet. + * + * The queue is empty (no good data) if WRITE = READ - 1, and is full if + * WRITE = READ. + * + * During initialization, the host sets up the READ queue position to the first + * INDEX position, and WRITE to the last (READ - 1 wrapped) + * + * When the firmware places a packet in a buffer, it will advance the READ index + * and fire the RX interrupt. The driver can then query the READ index and + * process as many packets as possible, moving the WRITE index forward as it + * resets the Rx queue buffers with new memory. + * + * The management in the driver is as follows: + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) + * + A received packet is processed and handed to the kernel network stack, + * detached from the iwl->rxq. The driver 'processed' index is updated. + * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free + * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ + * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there + * were enough free buffers and RX_STALLED is set it is cleared. + * + * + * Driver sequence: + * + * iwl_legacy_rx_queue_alloc() Allocates rx_free + * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls + * iwl_rx_queue_restock + * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx + * queue, updates firmware pointers, and updates + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_rx_replenish + * + * -- enable interrupts -- + * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the + * READ INDEX, detaching the SKB from the pool. + * Moves the packet buffer from queue to rx_used. + * Calls iwl_rx_queue_restock to refill any empty + * slots. + * ... + * + */ + +/** + * iwl_legacy_rx_queue_space - Return number of free slots available in queue. + */ +int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q) +{ + int s = q->read - q->write; + if (s <= 0) + s += RX_QUEUE_SIZE; + /* keep some buffer to not confuse full and empty queue */ + s -= 2; + if (s < 0) + s = 0; + return s; +} +EXPORT_SYMBOL(iwl_legacy_rx_queue_space); + +/** + * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue + */ +void +iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv, + struct iwl_rx_queue *q) +{ + unsigned long flags; + u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg; + u32 reg; + + spin_lock_irqsave(&q->lock, flags); + + if (q->need_update == 0) + goto exit_unlock; + + /* If power-saving is in use, make sure device is awake */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(priv, + "Rx queue requesting wakeup," + " GP1 = 0x%x\n", reg); + iwl_legacy_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + goto exit_unlock; + } + + q->write_actual = (q->write & ~0x7); + iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg, + q->write_actual); + + /* Else device is assumed to be awake */ + } else { + /* Device expects a multiple of 8 */ + q->write_actual = (q->write & ~0x7); + iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg, + q->write_actual); + } + + q->need_update = 0; + + exit_unlock: + spin_unlock_irqrestore(&q->lock, flags); +} +EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr); + +int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv) +{ + struct iwl_rx_queue *rxq = &priv->rxq; + struct device *dev = &priv->pci_dev->dev; + int i; + + spin_lock_init(&rxq->lock); + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + + /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ + rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, + GFP_KERNEL); + if (!rxq->bd) + goto err_bd; + + rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status), + &rxq->rb_stts_dma, GFP_KERNEL); + if (!rxq->rb_stts) + goto err_rb; + + /* Fill the rx_used queue with _all_ of the Rx buffers */ + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) + list_add_tail(&rxq->pool[i].list, &rxq->rx_used); + + /* Set us so that we have processed and used all buffers, but have + * not restocked the Rx queue with fresh buffers */ + rxq->read = rxq->write = 0; + rxq->write_actual = 0; + rxq->free_count = 0; + rxq->need_update = 0; + return 0; + +err_rb: + dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, + rxq->bd_dma); +err_bd: + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc); + + +void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); + + if (!report->state) { + IWL_DEBUG_11H(priv, + "Spectrum Measure Notification: Start\n"); + return; + } + + memcpy(&priv->measure_report, report, sizeof(*report)); + priv->measurement_status |= MEASUREMENT_READY; +} +EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif); + +void iwl_legacy_recover_from_statistics(struct iwl_priv *priv, + struct iwl_rx_packet *pkt) +{ + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + if (iwl_legacy_is_any_associated(priv)) { + if (priv->cfg->ops->lib->check_plcp_health) { + if (!priv->cfg->ops->lib->check_plcp_health( + priv, pkt)) { + /* + * high plcp error detected + * reset Radio + */ + iwl_legacy_force_reset(priv, + IWL_RF_RESET, false); + } + } + } +} +EXPORT_SYMBOL(iwl_legacy_recover_from_statistics); + +/* + * returns non-zero if packet should be dropped + */ +int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u32 decrypt_res, + struct ieee80211_rx_status *stats) +{ + u16 fc = le16_to_cpu(hdr->frame_control); + + /* + * All contexts have the same setting here due to it being + * a module parameter, so OK to check any context. + */ + if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags & + RXON_FILTER_DIS_DECRYPT_MSK) + return 0; + + if (!(fc & IEEE80211_FCTL_PROTECTED)) + return 0; + + IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res); + switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { + case RX_RES_STATUS_SEC_TYPE_TKIP: + /* The uCode has got a bad phase 1 Key, pushes the packet. + * Decryption will be done in SW. */ + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_KEY_TTAK) + break; + + case RX_RES_STATUS_SEC_TYPE_WEP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_BAD_ICV_MIC) { + /* bad ICV, the packet is destroyed since the + * decryption is inplace, drop it */ + IWL_DEBUG_RX(priv, "Packet destroyed\n"); + return -1; + } + case RX_RES_STATUS_SEC_TYPE_CCMP: + if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == + RX_RES_STATUS_DECRYPT_OK) { + IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n"); + stats->flag |= RX_FLAG_DECRYPTED; + } + break; + + default: + break; + } + return 0; +} +EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag); diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c new file mode 100644 index 000000000000..60f597f796ca --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-scan.c @@ -0,0 +1,625 @@ +/****************************************************************************** + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called LICENSE.GPL. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + *****************************************************************************/ +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/etherdevice.h> +#include <net/mac80211.h> + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" + +/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after + * sending probe req. This should be set long enough to hear probe responses + * from more than one AP. */ +#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */ +#define IWL_ACTIVE_DWELL_TIME_52 (20) + +#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3) +#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2) + +/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. + * Must be set longer than active dwell time. + * For the most reliable scan, set > AP beacon interval (typically 100msec). */ +#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ +#define IWL_PASSIVE_DWELL_TIME_52 (10) +#define IWL_PASSIVE_DWELL_BASE (100) +#define IWL_CHANNEL_TUNE_TIME 5 + +static int iwl_legacy_send_scan_abort(struct iwl_priv *priv) +{ + int ret; + struct iwl_rx_packet *pkt; + struct iwl_host_cmd cmd = { + .id = REPLY_SCAN_ABORT_CMD, + .flags = CMD_WANT_SKB, + }; + + /* Exit instantly with error when device is not ready + * to receive scan abort command or it does not perform + * hardware scan currently */ + if (!test_bit(STATUS_READY, &priv->status) || + !test_bit(STATUS_GEO_CONFIGURED, &priv->status) || + !test_bit(STATUS_SCAN_HW, &priv->status) || + test_bit(STATUS_FW_ERROR, &priv->status) || + test_bit(STATUS_EXIT_PENDING, &priv->status)) + return -EIO; + + ret = iwl_legacy_send_cmd_sync(priv, &cmd); + if (ret) + return ret; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->u.status != CAN_ABORT_STATUS) { + /* The scan abort will return 1 for success or + * 2 for "failure". A failure condition can be + * due to simply not being in an active scan which + * can occur if we send the scan abort before we + * the microcode has notified us that a scan is + * completed. */ + IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status); + ret = -EIO; + } + + iwl_legacy_free_pages(priv, cmd.reply_page); + return ret; +} + +static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted) +{ + /* check if scan was requested from mac80211 */ + if (priv->scan_request) { + IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n"); + ieee80211_scan_completed(priv->hw, aborted); + } + + priv->is_internal_short_scan = false; + priv->scan_vif = NULL; + priv->scan_request = NULL; +} + +void iwl_legacy_force_scan_end(struct iwl_priv *priv) +{ + lockdep_assert_held(&priv->mutex); + + if (!test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n"); + return; + } + + IWL_DEBUG_SCAN(priv, "Forcing scan end\n"); + clear_bit(STATUS_SCANNING, &priv->status); + clear_bit(STATUS_SCAN_HW, &priv->status); + clear_bit(STATUS_SCAN_ABORTING, &priv->status); + iwl_legacy_complete_scan(priv, true); +} + +static void iwl_legacy_do_scan_abort(struct iwl_priv *priv) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + if (!test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n"); + return; + } + + if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan abort in progress\n"); + return; + } + + ret = iwl_legacy_send_scan_abort(priv); + if (ret) { + IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret); + iwl_legacy_force_scan_end(priv); + } else + IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n"); +} + +/** + * iwl_scan_cancel - Cancel any currently executing HW scan + */ +int iwl_legacy_scan_cancel(struct iwl_priv *priv) +{ + IWL_DEBUG_SCAN(priv, "Queuing abort scan\n"); + queue_work(priv->workqueue, &priv->abort_scan); + return 0; +} +EXPORT_SYMBOL(iwl_legacy_scan_cancel); + +/** + * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan + * @ms: amount of time to wait (in milliseconds) for scan to abort + * + */ +int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(ms); + + lockdep_assert_held(&priv->mutex); + + IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n"); + + iwl_legacy_do_scan_abort(priv); + + while (time_before_eq(jiffies, timeout)) { + if (!test_bit(STATUS_SCAN_HW, &priv->status)) + break; + msleep(20); + } + + return test_bit(STATUS_SCAN_HW, &priv->status); +} +EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout); + +/* Service response to REPLY_SCAN_CMD (0x80) */ +static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanreq_notification *notif = + (struct iwl_scanreq_notification *)pkt->u.raw; + + IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status); +#endif +} + +/* Service SCAN_START_NOTIFICATION (0x82) */ +static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanstart_notification *notif = + (struct iwl_scanstart_notification *)pkt->u.raw; + priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); + IWL_DEBUG_SCAN(priv, "Scan start: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + notif->status, notif->beacon_timer); +} + +/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ +static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scanresults_notification *notif = + (struct iwl_scanresults_notification *)pkt->u.raw; + + IWL_DEBUG_SCAN(priv, "Scan ch.res: " + "%d [802.11%s] " + "(TSF: 0x%08X:%08X) - %d " + "elapsed=%lu usec\n", + notif->channel, + notif->band ? "bg" : "a", + le32_to_cpu(notif->tsf_high), + le32_to_cpu(notif->tsf_low), + le32_to_cpu(notif->statistics[0]), + le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf); +#endif +} + +/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ +static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; +#endif + + IWL_DEBUG_SCAN(priv, + "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", + scan_notif->scanned_channels, + scan_notif->tsf_low, + scan_notif->tsf_high, scan_notif->status); + + /* The HW is no longer scanning */ + clear_bit(STATUS_SCAN_HW, &priv->status); + + IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", + (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", + jiffies_to_msecs(jiffies - priv->scan_start)); + + queue_work(priv->workqueue, &priv->scan_completed); +} + +void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv) +{ + /* scan handlers */ + priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan; + priv->rx_handlers[SCAN_START_NOTIFICATION] = + iwl_legacy_rx_scan_start_notif; + priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = + iwl_legacy_rx_scan_results_notif; + priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = + iwl_legacy_rx_scan_complete_notif; +} +EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers); + +inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + u8 n_probes) +{ + if (band == IEEE80211_BAND_5GHZ) + return IWL_ACTIVE_DWELL_TIME_52 + + IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1); + else + return IWL_ACTIVE_DWELL_TIME_24 + + IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); +} +EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time); + +u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv, + enum ieee80211_band band, + struct ieee80211_vif *vif) +{ + struct iwl_rxon_context *ctx; + u16 passive = (band == IEEE80211_BAND_2GHZ) ? + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : + IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; + + if (iwl_legacy_is_any_associated(priv)) { + /* + * If we're associated, we clamp the maximum passive + * dwell time to be 98% of the smallest beacon interval + * (minus 2 * channel tune time) + */ + for_each_context(priv, ctx) { + u16 value; + + if (!iwl_legacy_is_associated_ctx(ctx)) + continue; + value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0; + if ((value > IWL_PASSIVE_DWELL_BASE) || !value) + value = IWL_PASSIVE_DWELL_BASE; + value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; + passive = min(value, passive); + } + } + + return passive; +} +EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time); + +void iwl_legacy_init_scan_params(struct iwl_priv *priv) +{ + u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1; + if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) + priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; + if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) + priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; +} +EXPORT_SYMBOL(iwl_legacy_init_scan_params); + +static int __must_check iwl_legacy_scan_initiate(struct iwl_priv *priv, + struct ieee80211_vif *vif, + bool internal, + enum ieee80211_band band) +{ + int ret; + + lockdep_assert_held(&priv->mutex); + + if (WARN_ON(!priv->cfg->ops->utils->request_scan)) + return -EOPNOTSUPP; + + cancel_delayed_work(&priv->scan_check); + + if (!iwl_legacy_is_ready_rf(priv)) { + IWL_WARN(priv, "Request scan called when driver not ready.\n"); + return -EIO; + } + + if (test_bit(STATUS_SCAN_HW, &priv->status)) { + IWL_DEBUG_SCAN(priv, + "Multiple concurrent scan requests in parallel.\n"); + return -EBUSY; + } + + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n"); + return -EBUSY; + } + + IWL_DEBUG_SCAN(priv, "Starting %sscan...\n", + internal ? "internal short " : ""); + + set_bit(STATUS_SCANNING, &priv->status); + priv->is_internal_short_scan = internal; + priv->scan_start = jiffies; + priv->scan_band = band; + + ret = priv->cfg->ops->utils->request_scan(priv, vif); + if (ret) { + clear_bit(STATUS_SCANNING, &priv->status); + priv->is_internal_short_scan = false; + return ret; + } + + queue_delayed_work(priv->workqueue, &priv->scan_check, + IWL_SCAN_CHECK_WATCHDOG); + + return 0; +} + +int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_scan_request *req) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (req->n_channels == 0) + return -EINVAL; + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_SCANNING, &priv->status) && + !priv->is_internal_short_scan) { + IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); + ret = -EAGAIN; + goto out_unlock; + } + + /* mac80211 will only ask for one band at a time */ + priv->scan_request = req; + priv->scan_vif = vif; + + /* + * If an internal scan is in progress, just set + * up the scan_request as per above. + */ + if (priv->is_internal_short_scan) { + IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); + ret = 0; + } else + ret = iwl_legacy_scan_initiate(priv, vif, false, + req->channels[0]->band); + + IWL_DEBUG_MAC80211(priv, "leave\n"); + +out_unlock: + mutex_unlock(&priv->mutex); + + return ret; +} +EXPORT_SYMBOL(iwl_legacy_mac_hw_scan); + +/* + * internal short scan, this function should only been called while associated. + * It will reset and tune the radio to prevent possible RF related problem + */ +void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv) +{ + queue_work(priv->workqueue, &priv->start_internal_scan); +} + +static void iwl_legacy_bg_start_internal_scan(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, start_internal_scan); + + IWL_DEBUG_SCAN(priv, "Start internal scan\n"); + + mutex_lock(&priv->mutex); + + if (priv->is_internal_short_scan == true) { + IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); + goto unlock; + } + + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); + goto unlock; + } + + if (iwl_legacy_scan_initiate(priv, NULL, true, priv->band)) + IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n"); + unlock: + mutex_unlock(&priv->mutex); +} + +static void iwl_legacy_bg_scan_check(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, scan_check.work); + + IWL_DEBUG_SCAN(priv, "Scan check work\n"); + + /* Since we are here firmware does not finish scan and + * most likely is in bad shape, so we don't bother to + * send abort command, just force scan complete to mac80211 */ + mutex_lock(&priv->mutex); + iwl_legacy_force_scan_end(priv); + mutex_unlock(&priv->mutex); +} + +/** + * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request + */ + +u16 +iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, + const u8 *ta, const u8 *ies, int ie_len, int left) +{ + int len = 0; + u8 *pos = NULL; + + /* Make sure there is enough space for the probe request, + * two mandatory IEs and the data */ + left -= 24; + if (left < 0) + return 0; + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN); + memcpy(frame->sa, ta, ETH_ALEN); + memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN); + frame->seq_ctrl = 0; + + len += 24; + + /* ...next IE... */ + pos = &frame->u.probe_req.variable[0]; + + /* fill in our indirect SSID IE */ + left -= 2; + if (left < 0) + return 0; + *pos++ = WLAN_EID_SSID; + *pos++ = 0; + + len += 2; + + if (WARN_ON(left < ie_len)) + return len; + + if (ies && ie_len) { + memcpy(pos, ies, ie_len); + len += ie_len; + } + + return (u16)len; +} +EXPORT_SYMBOL(iwl_legacy_fill_probe_req); + +static void iwl_legacy_bg_abort_scan(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); + + IWL_DEBUG_SCAN(priv, "Abort scan work\n"); + + /* We keep scan_check work queued in case when firmware will not + * report back scan completed notification */ + mutex_lock(&priv->mutex); + iwl_legacy_scan_cancel_timeout(priv, 200); + mutex_unlock(&priv->mutex); +} + +static void iwl_legacy_bg_scan_completed(struct work_struct *work) +{ + struct iwl_priv *priv = + container_of(work, struct iwl_priv, scan_completed); + bool aborted; + + IWL_DEBUG_SCAN(priv, "Completed %sscan.\n", + priv->is_internal_short_scan ? "internal short " : ""); + + cancel_delayed_work(&priv->scan_check); + + mutex_lock(&priv->mutex); + + aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status); + if (aborted) + IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n"); + + if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan already completed.\n"); + goto out_settings; + } + + if (priv->is_internal_short_scan && !aborted) { + int err; + + /* Check if mac80211 requested scan during our internal scan */ + if (priv->scan_request == NULL) + goto out_complete; + + /* If so request a new scan */ + err = iwl_legacy_scan_initiate(priv, priv->scan_vif, false, + priv->scan_request->channels[0]->band); + if (err) { + IWL_DEBUG_SCAN(priv, + "failed to initiate pending scan: %d\n", err); + aborted = true; + goto out_complete; + } + + goto out; + } + +out_complete: + iwl_legacy_complete_scan(priv, aborted); + +out_settings: + /* Can we still talk to firmware ? */ + if (!iwl_legacy_is_ready_rf(priv)) + goto out; + + /* + * We do not commit power settings while scan is pending, + * do it now if the settings changed. + */ + iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, + false); + iwl_legacy_set_tx_power(priv, priv->tx_power_next, false); + + priv->cfg->ops->utils->post_scan(priv); + +out: + mutex_unlock(&priv->mutex); +} + +void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv) +{ + INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed); + INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan); + INIT_WORK(&priv->start_internal_scan, + iwl_legacy_bg_start_internal_scan); + INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check); +} +EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work); + +void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv) +{ + cancel_work_sync(&priv->start_internal_scan); + cancel_work_sync(&priv->abort_scan); + cancel_work_sync(&priv->scan_completed); + + if (cancel_delayed_work_sync(&priv->scan_check)) { + mutex_lock(&priv->mutex); + iwl_legacy_force_scan_end(priv); + mutex_unlock(&priv->mutex); + } +} +EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work); diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h new file mode 100644 index 000000000000..9f70a4723103 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h @@ -0,0 +1,92 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#ifndef __iwl_legacy_spectrum_h__ +#define __iwl_legacy_spectrum_h__ +enum { /* ieee80211_basic_report.map */ + IEEE80211_BASIC_MAP_BSS = (1 << 0), + IEEE80211_BASIC_MAP_OFDM = (1 << 1), + IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2), + IEEE80211_BASIC_MAP_RADAR = (1 << 3), + IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4), + /* Bits 5-7 are reserved */ + +}; +struct ieee80211_basic_report { + u8 channel; + __le64 start_time; + __le16 duration; + u8 map; +} __packed; + +enum { /* ieee80211_measurement_request.mode */ + /* Bit 0 is reserved */ + IEEE80211_MEASUREMENT_ENABLE = (1 << 1), + IEEE80211_MEASUREMENT_REQUEST = (1 << 2), + IEEE80211_MEASUREMENT_REPORT = (1 << 3), + /* Bits 4-7 are reserved */ +}; + +enum { + IEEE80211_REPORT_BASIC = 0, /* required */ + IEEE80211_REPORT_CCA = 1, /* optional */ + IEEE80211_REPORT_RPI = 2, /* optional */ + /* 3-255 reserved */ +}; + +struct ieee80211_measurement_params { + u8 channel; + __le64 start_time; + __le16 duration; +} __packed; + +struct ieee80211_info_element { + u8 id; + u8 len; + u8 data[0]; +} __packed; + +struct ieee80211_measurement_request { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + struct ieee80211_measurement_params params[0]; +} __packed; + +struct ieee80211_measurement_report { + struct ieee80211_info_element ie; + u8 token; + u8 mode; + u8 type; + union { + struct ieee80211_basic_report basic[0]; + } u; +} __packed; + +#endif diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c new file mode 100644 index 000000000000..47c9da3834ea --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-sta.c @@ -0,0 +1,816 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <linux/sched.h> +#include <linux/lockdep.h> + +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" + +/* priv->sta_lock must be held */ +static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) +{ + + if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) + IWL_ERR(priv, + "ACTIVATE a non DRIVER active station id %u addr %pM\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + + if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) { + IWL_DEBUG_ASSOC(priv, + "STA id %u addr %pM already present" + " in uCode (according to driver)\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + } else { + priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE; + IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n", + sta_id, priv->stations[sta_id].sta.sta.addr); + } +} + +static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv, + struct iwl_legacy_addsta_cmd *addsta, + struct iwl_rx_packet *pkt, + bool sync) +{ + u8 sta_id = addsta->sta.sta_id; + unsigned long flags; + int ret = -EIO; + + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", + pkt->hdr.flags); + return ret; + } + + IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", + sta_id); + + spin_lock_irqsave(&priv->sta_lock, flags); + + switch (pkt->u.add_sta.status) { + case ADD_STA_SUCCESS_MSK: + IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); + iwl_legacy_sta_ucode_activate(priv, sta_id); + ret = 0; + break; + case ADD_STA_NO_ROOM_IN_TABLE: + IWL_ERR(priv, "Adding station %d failed, no room in table.\n", + sta_id); + break; + case ADD_STA_NO_BLOCK_ACK_RESOURCE: + IWL_ERR(priv, + "Adding station %d failed, no block ack resource.\n", + sta_id); + break; + case ADD_STA_MODIFY_NON_EXIST_STA: + IWL_ERR(priv, "Attempting to modify non-existing station %d\n", + sta_id); + break; + default: + IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", + pkt->u.add_sta.status); + break; + } + + IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n", + priv->stations[sta_id].sta.mode == + STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", + sta_id, priv->stations[sta_id].sta.sta.addr); + + /* + * XXX: The MAC address in the command buffer is often changed from + * the original sent to the device. That is, the MAC address + * written to the command buffer often is not the same MAC adress + * read from the command buffer when the command returns. This + * issue has not yet been resolved and this debugging is left to + * observe the problem. + */ + IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n", + priv->stations[sta_id].sta.mode == + STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", + addsta->sta.addr); + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return ret; +} + +static void iwl_legacy_add_sta_callback(struct iwl_priv *priv, + struct iwl_device_cmd *cmd, + struct iwl_rx_packet *pkt) +{ + struct iwl_legacy_addsta_cmd *addsta = + (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload; + + iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false); + +} + +int iwl_legacy_send_add_sta(struct iwl_priv *priv, + struct iwl_legacy_addsta_cmd *sta, u8 flags) +{ + struct iwl_rx_packet *pkt = NULL; + int ret = 0; + u8 data[sizeof(*sta)]; + struct iwl_host_cmd cmd = { + .id = REPLY_ADD_STA, + .flags = flags, + .data = data, + }; + u8 sta_id __maybe_unused = sta->sta.sta_id; + + IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n", + sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); + + if (flags & CMD_ASYNC) + cmd.callback = iwl_legacy_add_sta_callback; + else { + cmd.flags |= CMD_WANT_SKB; + might_sleep(); + } + + cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); + ret = iwl_legacy_send_cmd(priv, &cmd); + + if (ret || (flags & CMD_ASYNC)) + return ret; + + if (ret == 0) { + pkt = (struct iwl_rx_packet *)cmd.reply_page; + ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true); + } + iwl_legacy_free_pages(priv, cmd.reply_page); + + return ret; +} +EXPORT_SYMBOL(iwl_legacy_send_add_sta); + +static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index, + struct ieee80211_sta *sta, + struct iwl_rxon_context *ctx) +{ + struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap; + __le32 sta_flags; + u8 mimo_ps_mode; + + if (!sta || !sta_ht_inf->ht_supported) + goto done; + + mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; + IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n", + (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? + "static" : + (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? + "dynamic" : "disabled"); + + sta_flags = priv->stations[index].sta.station_flags; + + sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK); + + switch (mimo_ps_mode) { + case WLAN_HT_CAP_SM_PS_STATIC: + sta_flags |= STA_FLG_MIMO_DIS_MSK; + break; + case WLAN_HT_CAP_SM_PS_DYNAMIC: + sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK; + break; + case WLAN_HT_CAP_SM_PS_DISABLED: + break; + default: + IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode); + break; + } + + sta_flags |= cpu_to_le32( + (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS); + + sta_flags |= cpu_to_le32( + (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS); + + if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap)) + sta_flags |= STA_FLG_HT40_EN_MSK; + else + sta_flags &= ~STA_FLG_HT40_EN_MSK; + + priv->stations[index].sta.station_flags = sta_flags; + done: + return; +} + +/** + * iwl_legacy_prep_station - Prepare station information for addition + * + * should be called with sta_lock held + */ +u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, struct ieee80211_sta *sta) +{ + struct iwl_station_entry *station; + int i; + u8 sta_id = IWL_INVALID_STATION; + u16 rate; + + if (is_ap) + sta_id = ctx->ap_sta_id; + else if (is_broadcast_ether_addr(addr)) + sta_id = ctx->bcast_sta_id; + else + for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) { + if (!compare_ether_addr(priv->stations[i].sta.sta.addr, + addr)) { + sta_id = i; + break; + } + + if (!priv->stations[i].used && + sta_id == IWL_INVALID_STATION) + sta_id = i; + } + + /* + * These two conditions have the same outcome, but keep them + * separate + */ + if (unlikely(sta_id == IWL_INVALID_STATION)) + return sta_id; + + /* + * uCode is not able to deal with multiple requests to add a + * station. Keep track if one is in progress so that we do not send + * another. + */ + if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { + IWL_DEBUG_INFO(priv, + "STA %d already in process of being added.\n", + sta_id); + return sta_id; + } + + if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && + (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) && + !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { + IWL_DEBUG_ASSOC(priv, + "STA %d (%pM) already added, not adding again.\n", + sta_id, addr); + return sta_id; + } + + station = &priv->stations[sta_id]; + station->used = IWL_STA_DRIVER_ACTIVE; + IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", + sta_id, addr); + priv->num_stations++; + + /* Set up the REPLY_ADD_STA command to send to device */ + memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd)); + memcpy(station->sta.sta.addr, addr, ETH_ALEN); + station->sta.mode = 0; + station->sta.sta.sta_id = sta_id; + station->sta.station_flags = ctx->station_flags; + station->ctxid = ctx->ctxid; + + if (sta) { + struct iwl_station_priv_common *sta_priv; + + sta_priv = (void *)sta->drv_priv; + sta_priv->ctx = ctx; + } + + /* + * OK to call unconditionally, since local stations (IBSS BSSID + * STA and broadcast STA) pass in a NULL sta, and mac80211 + * doesn't allow HT IBSS. + */ + iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx); + + /* 3945 only */ + rate = (priv->band == IEEE80211_BAND_5GHZ) ? + IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP; + /* Turn on both antennas for the station... */ + station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); + + return sta_id; + +} +EXPORT_SYMBOL_GPL(iwl_legacy_prep_station); + +#define STA_WAIT_TIMEOUT (HZ/2) + +/** + * iwl_legacy_add_station_common - + */ +int +iwl_legacy_add_station_common(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta, u8 *sta_id_r) +{ + unsigned long flags_spin; + int ret = 0; + u8 sta_id; + struct iwl_legacy_addsta_cmd sta_cmd; + + *sta_id_r = 0; + spin_lock_irqsave(&priv->sta_lock, flags_spin); + sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta); + if (sta_id == IWL_INVALID_STATION) { + IWL_ERR(priv, "Unable to prepare station %pM for addition\n", + addr); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EINVAL; + } + + /* + * uCode is not able to deal with multiple requests to add a + * station. Keep track if one is in progress so that we do not send + * another. + */ + if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { + IWL_DEBUG_INFO(priv, + "STA %d already in process of being added.\n", + sta_id); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EEXIST; + } + + if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) && + (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_ASSOC(priv, + "STA %d (%pM) already added, not adding again.\n", + sta_id, addr); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EEXIST; + } + + priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; + memcpy(&sta_cmd, &priv->stations[sta_id].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + /* Add station to device's station table */ + ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); + if (ret) { + spin_lock_irqsave(&priv->sta_lock, flags_spin); + IWL_ERR(priv, "Adding station %pM failed.\n", + priv->stations[sta_id].sta.sta.addr); + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + } + *sta_id_r = sta_id; + return ret; +} +EXPORT_SYMBOL(iwl_legacy_add_station_common); + +/** + * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station + * + * priv->sta_lock must be held + */ +static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id) +{ + /* Ucode must be active and driver must be non active */ + if ((priv->stations[sta_id].used & + (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != + IWL_STA_UCODE_ACTIVE) + IWL_ERR(priv, "removed non active STA %u\n", sta_id); + + priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; + + memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); + IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id); +} + +static int iwl_legacy_send_remove_station(struct iwl_priv *priv, + const u8 *addr, int sta_id, + bool temporary) +{ + struct iwl_rx_packet *pkt; + int ret; + + unsigned long flags_spin; + struct iwl_rem_sta_cmd rm_sta_cmd; + + struct iwl_host_cmd cmd = { + .id = REPLY_REMOVE_STA, + .len = sizeof(struct iwl_rem_sta_cmd), + .flags = CMD_SYNC, + .data = &rm_sta_cmd, + }; + + memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); + rm_sta_cmd.num_sta = 1; + memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN); + + cmd.flags |= CMD_WANT_SKB; + + ret = iwl_legacy_send_cmd(priv, &cmd); + + if (ret) + return ret; + + pkt = (struct iwl_rx_packet *)cmd.reply_page; + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { + IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", + pkt->hdr.flags); + ret = -EIO; + } + + if (!ret) { + switch (pkt->u.rem_sta.status) { + case REM_STA_SUCCESS_MSK: + if (!temporary) { + spin_lock_irqsave(&priv->sta_lock, flags_spin); + iwl_legacy_sta_ucode_deactivate(priv, sta_id); + spin_unlock_irqrestore(&priv->sta_lock, + flags_spin); + } + IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); + break; + default: + ret = -EIO; + IWL_ERR(priv, "REPLY_REMOVE_STA failed\n"); + break; + } + } + iwl_legacy_free_pages(priv, cmd.reply_page); + + return ret; +} + +/** + * iwl_legacy_remove_station - Remove driver's knowledge of station. + */ +int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id, + const u8 *addr) +{ + unsigned long flags; + + if (!iwl_legacy_is_ready(priv)) { + IWL_DEBUG_INFO(priv, + "Unable to remove station %pM, device not ready.\n", + addr); + /* + * It is typical for stations to be removed when we are + * going down. Return success since device will be down + * soon anyway + */ + return 0; + } + + IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", + sta_id, addr); + + if (WARN_ON(sta_id == IWL_INVALID_STATION)) + return -EINVAL; + + spin_lock_irqsave(&priv->sta_lock, flags); + + if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { + IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n", + addr); + goto out_err; + } + + if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n", + addr); + goto out_err; + } + + if (priv->stations[sta_id].used & IWL_STA_LOCAL) { + kfree(priv->stations[sta_id].lq); + priv->stations[sta_id].lq = NULL; + } + + priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + + priv->num_stations--; + + BUG_ON(priv->num_stations < 0); + + spin_unlock_irqrestore(&priv->sta_lock, flags); + + return iwl_legacy_send_remove_station(priv, addr, sta_id, false); +out_err: + spin_unlock_irqrestore(&priv->sta_lock, flags); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(iwl_legacy_remove_station); + +/** + * iwl_legacy_clear_ucode_stations - clear ucode station table bits + * + * This function clears all the bits in the driver indicating + * which stations are active in the ucode. Call when something + * other than explicit station management would cause this in + * the ucode, e.g. unassociated RXON. + */ +void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv, + struct iwl_rxon_context *ctx) +{ + int i; + unsigned long flags_spin; + bool cleared = false; + + IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n"); + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + for (i = 0; i < priv->hw_params.max_stations; i++) { + if (ctx && ctx->ctxid != priv->stations[i].ctxid) + continue; + + if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) { + IWL_DEBUG_INFO(priv, + "Clearing ucode active for station %d\n", i); + priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; + cleared = true; + } + } + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + if (!cleared) + IWL_DEBUG_INFO(priv, + "No active stations found to be cleared\n"); +} +EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations); + +/** + * iwl_legacy_restore_stations() - Restore driver known stations to device + * + * All stations considered active by driver, but not present in ucode, is + * restored. + * + * Function sleeps. + */ +void +iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) +{ + struct iwl_legacy_addsta_cmd sta_cmd; + struct iwl_link_quality_cmd lq; + unsigned long flags_spin; + int i; + bool found = false; + int ret; + bool send_lq; + + if (!iwl_legacy_is_ready(priv)) { + IWL_DEBUG_INFO(priv, + "Not ready yet, not restoring any stations.\n"); + return; + } + + IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); + spin_lock_irqsave(&priv->sta_lock, flags_spin); + for (i = 0; i < priv->hw_params.max_stations; i++) { + if (ctx->ctxid != priv->stations[i].ctxid) + continue; + if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && + !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) { + IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n", + priv->stations[i].sta.sta.addr); + priv->stations[i].sta.mode = 0; + priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS; + found = true; + } + } + + for (i = 0; i < priv->hw_params.max_stations; i++) { + if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { + memcpy(&sta_cmd, &priv->stations[i].sta, + sizeof(struct iwl_legacy_addsta_cmd)); + send_lq = false; + if (priv->stations[i].lq) { + memcpy(&lq, priv->stations[i].lq, + sizeof(struct iwl_link_quality_cmd)); + send_lq = true; + } + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); + if (ret) { + spin_lock_irqsave(&priv->sta_lock, flags_spin); + IWL_ERR(priv, "Adding station %pM failed.\n", + priv->stations[i].sta.sta.addr); + priv->stations[i].used &= + ~IWL_STA_DRIVER_ACTIVE; + priv->stations[i].used &= + ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&priv->sta_lock, + flags_spin); + } + /* + * Rate scaling has already been initialized, send + * current LQ command + */ + if (send_lq) + iwl_legacy_send_lq_cmd(priv, ctx, &lq, + CMD_SYNC, true); + spin_lock_irqsave(&priv->sta_lock, flags_spin); + priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; + } + } + + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + if (!found) + IWL_DEBUG_INFO(priv, "Restoring all known stations" + " .... no stations to be restored.\n"); + else + IWL_DEBUG_INFO(priv, "Restoring all known stations" + " .... complete.\n"); +} +EXPORT_SYMBOL(iwl_legacy_restore_stations); + +int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv) +{ + int i; + + for (i = 0; i < priv->sta_key_max_num; i++) + if (!test_and_set_bit(i, &priv->ucode_key_table)) + return i; + + return WEP_INVALID_OFFSET; +} +EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index); + +void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->sta_lock, flags); + for (i = 0; i < priv->hw_params.max_stations; i++) { + if (!(priv->stations[i].used & IWL_STA_BCAST)) + continue; + + priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE; + priv->num_stations--; + BUG_ON(priv->num_stations < 0); + kfree(priv->stations[i].lq); + priv->stations[i].lq = NULL; + } + spin_unlock_irqrestore(&priv->sta_lock, flags); +} +EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq) +{ + int i; + IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id); + IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n", + lq->general_params.single_stream_ant_msk, + lq->general_params.dual_stream_ant_msk); + + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) + IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n", + i, lq->rs_table[i].rate_n_flags); +} +#else +static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv, + struct iwl_link_quality_cmd *lq) +{ +} +#endif + +/** + * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity + * + * It sometimes happens when a HT rate has been in use and we + * loose connectivity with AP then mac80211 will first tell us that the + * current channel is not HT anymore before removing the station. In such a + * scenario the RXON flags will be updated to indicate we are not + * communicating HT anymore, but the LQ command may still contain HT rates. + * Test for this to prevent driver from sending LQ command between the time + * RXON flags are updated and when LQ command is updated. + */ +static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq) +{ + int i; + + if (ctx->ht.enabled) + return true; + + IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n", + ctx->active.channel); + for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { + if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & + RATE_MCS_HT_MSK) { + IWL_DEBUG_INFO(priv, + "index %d of LQ expects HT channel\n", + i); + return false; + } + } + return true; +} + +/** + * iwl_legacy_send_lq_cmd() - Send link quality command + * @init: This command is sent as part of station initialization right + * after station has been added. + * + * The link quality command is sent as the last step of station creation. + * This is the special case in which init is set and we call a callback in + * this case to clear the state indicating that station creation is in + * progress. + */ +int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq, u8 flags, bool init) +{ + int ret = 0; + unsigned long flags_spin; + + struct iwl_host_cmd cmd = { + .id = REPLY_TX_LINK_QUALITY_CMD, + .len = sizeof(struct iwl_link_quality_cmd), + .flags = flags, + .data = lq, + }; + + if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) + return -EINVAL; + + + spin_lock_irqsave(&priv->sta_lock, flags_spin); + if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + return -EINVAL; + } + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + + iwl_legacy_dump_lq_cmd(priv, lq); + BUG_ON(init && (cmd.flags & CMD_ASYNC)); + + if (iwl_legacy_is_lq_table_valid(priv, ctx, lq)) + ret = iwl_legacy_send_cmd(priv, &cmd); + else + ret = -EINVAL; + + if (cmd.flags & CMD_ASYNC) + return ret; + + if (init) { + IWL_DEBUG_INFO(priv, "init LQ command complete," + " clearing sta addition status for sta %d\n", + lq->sta_id); + spin_lock_irqsave(&priv->sta_lock, flags_spin); + priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; + spin_unlock_irqrestore(&priv->sta_lock, flags_spin); + } + return ret; +} +EXPORT_SYMBOL(iwl_legacy_send_lq_cmd); + +int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv; + int ret; + + IWL_DEBUG_INFO(priv, "received request to remove station %pM\n", + sta->addr); + mutex_lock(&priv->mutex); + IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", + sta->addr); + ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr); + if (ret) + IWL_ERR(priv, "Error removing station %pM\n", + sta->addr); + mutex_unlock(&priv->mutex); + return ret; +} +EXPORT_SYMBOL(iwl_legacy_mac_sta_remove); diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h new file mode 100644 index 000000000000..67bd75fe01a1 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-sta.h @@ -0,0 +1,148 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ +#ifndef __iwl_legacy_sta_h__ +#define __iwl_legacy_sta_h__ + +#include "iwl-dev.h" + +#define HW_KEY_DYNAMIC 0 +#define HW_KEY_DEFAULT 1 + +#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */ +#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */ +#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of + being activated */ +#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211; + (this is for the IBSS BSSID stations) */ +#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */ + + +void iwl_legacy_restore_stations(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv, + struct iwl_rxon_context *ctx); +void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv); +int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv); +int iwl_legacy_send_add_sta(struct iwl_priv *priv, + struct iwl_legacy_addsta_cmd *sta, u8 flags); +int iwl_legacy_add_station_common(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta, u8 *sta_id_r); +int iwl_legacy_remove_station(struct iwl_priv *priv, + const u8 sta_id, + const u8 *addr); +int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + +u8 iwl_legacy_prep_station(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + const u8 *addr, bool is_ap, + struct ieee80211_sta *sta); + +int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_link_quality_cmd *lq, + u8 flags, bool init); + +/** + * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver + * @priv: iwl priv struct + * + * This is called during iwl_down() to make sure that in the case + * we're coming there from a hardware restart mac80211 will be + * able to reconfigure stations -- if we're getting there in the + * normal down flow then the stations will already be cleared. + */ +static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv) +{ + unsigned long flags; + struct iwl_rxon_context *ctx; + + spin_lock_irqsave(&priv->sta_lock, flags); + memset(priv->stations, 0, sizeof(priv->stations)); + priv->num_stations = 0; + + priv->ucode_key_table = 0; + + for_each_context(priv, ctx) { + /* + * Remove all key information that is not stored as part + * of station information since mac80211 may not have had + * a chance to remove all the keys. When device is + * reconfigured by mac80211 after an error all keys will + * be reconfigured. + */ + memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys)); + ctx->key_mapping_keys = 0; + } + + spin_unlock_irqrestore(&priv->sta_lock, flags); +} + +static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta) +{ + if (WARN_ON(!sta)) + return IWL_INVALID_STATION; + + return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id; +} + +/** + * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta + * @priv: iwl priv + * @context: the current context + * @sta: mac80211 station + * + * In certain circumstances mac80211 passes a station pointer + * that may be %NULL, for example during TX or key setup. In + * that case, we need to use the broadcast station, so this + * inline wraps that pattern. + */ +static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv, + struct iwl_rxon_context *context, + struct ieee80211_sta *sta) +{ + int sta_id; + + if (!sta) + return context->bcast_sta_id; + + sta_id = iwl_legacy_sta_id(sta); + + /* + * mac80211 should not be passing a partially + * initialised station! + */ + WARN_ON(sta_id == IWL_INVALID_STATION); + + return sta_id; +} +#endif /* __iwl_legacy_sta_h__ */ diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c new file mode 100644 index 000000000000..a227773cb384 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl-tx.c @@ -0,0 +1,660 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/etherdevice.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <net/mac80211.h> +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-sta.h" +#include "iwl-io.h" +#include "iwl-helpers.h" + +/** + * iwl_legacy_txq_update_write_ptr - Send new write index to hardware + */ +void +iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + u32 reg = 0; + int txq_id = txq->q.id; + + if (txq->need_update == 0) + return; + + /* if we're trying to save power */ + if (test_bit(STATUS_POWER_PMI, &priv->status)) { + /* wake up nic if it's powered down ... + * uCode will wake up, and interrupt us again, so next + * time we'll skip this part. */ + reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + + if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { + IWL_DEBUG_INFO(priv, + "Tx queue %d requesting wakeup," + " GP1 = 0x%x\n", txq_id, reg); + iwl_legacy_set_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + return; + } + + iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, + txq->q.write_ptr | (txq_id << 8)); + + /* + * else not in power-save mode, + * uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + } else + iwl_write32(priv, HBUS_TARG_WRPTR, + txq->q.write_ptr | (txq_id << 8)); + txq->need_update = 0; +} +EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr); + +/** + * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's + */ +void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + + if (q->n_bd == 0) + return; + + while (q->write_ptr != q->read_ptr) { + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd); + } +} +EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap); + +/** + * iwl_legacy_tx_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct device *dev = &priv->pci_dev->dev; + int i; + + iwl_legacy_tx_queue_unmap(priv, txq_id); + + /* De-alloc array of command/tx buffers */ + for (i = 0; i < TFD_TX_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + + /* De-alloc circular buffer of TFDs */ + if (txq->q.n_bd) + dma_free_coherent(dev, priv->hw_params.tfd_size * + txq->q.n_bd, txq->tfds, txq->q.dma_addr); + + /* De-alloc array of per-TFD driver data */ + kfree(txq->txb); + txq->txb = NULL; + + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} +EXPORT_SYMBOL(iwl_legacy_tx_queue_free); + +/** + * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue + */ +void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv) +{ + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + struct iwl_queue *q = &txq->q; + bool huge = false; + int i; + + if (q->n_bd == 0) + return; + + while (q->read_ptr != q->write_ptr) { + /* we have no way to tell if it is a huge cmd ATM */ + i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0); + + if (txq->meta[i].flags & CMD_SIZE_HUGE) + huge = true; + else + pci_unmap_single(priv->pci_dev, + dma_unmap_addr(&txq->meta[i], mapping), + dma_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd); + } + + if (huge) { + i = q->n_window; + pci_unmap_single(priv->pci_dev, + dma_unmap_addr(&txq->meta[i], mapping), + dma_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); + } +} +EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap); + +/** + * iwl_legacy_cmd_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +void iwl_legacy_cmd_queue_free(struct iwl_priv *priv) +{ + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + struct device *dev = &priv->pci_dev->dev; + int i; + + iwl_legacy_cmd_queue_unmap(priv); + + /* De-alloc array of command/tx buffers */ + for (i = 0; i <= TFD_CMD_SLOTS; i++) + kfree(txq->cmd[i]); + + /* De-alloc circular buffer of TFDs */ + if (txq->q.n_bd) + dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd, + txq->tfds, txq->q.dma_addr); + + /* deallocate arrays */ + kfree(txq->cmd); + kfree(txq->meta); + txq->cmd = NULL; + txq->meta = NULL; + + /* 0-fill queue descriptor structure */ + memset(txq, 0, sizeof(*txq)); +} +EXPORT_SYMBOL(iwl_legacy_cmd_queue_free); + +/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** + * DMA services + * + * Theory of operation + * + * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer + * of buffer descriptors, each of which points to one or more data buffers for + * the device to read from or fill. Driver and device exchange status of each + * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty + * entries in each circular buffer, to protect against confusing empty and full + * queue states. + * + * The device reads or writes the data in the queues via the device's several + * DMA/FIFO channels. Each queue is mapped to a single DMA channel. + * + * For Tx queue, there are low mark and high mark limits. If, after queuing + * the packet for Tx, free space become < low mark, Tx queue stopped. When + * reclaiming packets (on 'tx done IRQ), if free space become > high mark, + * Tx queue resumed. + * + * See more detailed info in iwl-4965-hw.h. + ***************************************************/ + +int iwl_legacy_queue_space(const struct iwl_queue *q) +{ + int s = q->read_ptr - q->write_ptr; + + if (q->read_ptr > q->write_ptr) + s -= q->n_bd; + + if (s <= 0) + s += q->n_window; + /* keep some reserve to not confuse empty and full situations */ + s -= 2; + if (s < 0) + s = 0; + return s; +} +EXPORT_SYMBOL(iwl_legacy_queue_space); + + +/** + * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes + */ +static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q, + int count, int slots_num, u32 id) +{ + q->n_bd = count; + q->n_window = slots_num; + q->id = id; + + /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap + * and iwl_legacy_queue_dec_wrap are broken. */ + BUG_ON(!is_power_of_2(count)); + + /* slots_num must be power-of-two size, otherwise + * iwl_legacy_get_cmd_index is broken. */ + BUG_ON(!is_power_of_2(slots_num)); + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->write_ptr = q->read_ptr = 0; + + return 0; +} + +/** + * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue + */ +static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv, + struct iwl_tx_queue *txq, u32 id) +{ + struct device *dev = &priv->pci_dev->dev; + size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; + + /* Driver private data, only for Tx (not command) queues, + * not shared with device. */ + if (id != priv->cmd_queue) { + txq->txb = kzalloc(sizeof(txq->txb[0]) * + TFD_QUEUE_SIZE_MAX, GFP_KERNEL); + if (!txq->txb) { + IWL_ERR(priv, "kmalloc for auxiliary BD " + "structures failed\n"); + goto error; + } + } else { + txq->txb = NULL; + } + + /* Circular buffer of transmit frame descriptors (TFDs), + * shared with device */ + txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, + GFP_KERNEL); + if (!txq->tfds) { + IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); + goto error; + } + txq->q.id = id; + + return 0; + + error: + kfree(txq->txb); + txq->txb = NULL; + + return -ENOMEM; +} + +/** + * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue + */ +int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id) +{ + int i, len; + int ret; + int actual_slots = slots_num; + + /* + * Alloc buffer array for commands (Tx or other types of commands). + * For the command queue (#4/#9), allocate command space + one big + * command for scan, since scan command is very huge; the system will + * not have two scans at the same time, so only one is needed. + * For normal Tx queues (all other queues), no super-size command + * space is needed. + */ + if (txq_id == priv->cmd_queue) + actual_slots++; + + txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, + GFP_KERNEL); + txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, + GFP_KERNEL); + + if (!txq->meta || !txq->cmd) + goto out_free_arrays; + + len = sizeof(struct iwl_device_cmd); + for (i = 0; i < actual_slots; i++) { + /* only happens for cmd queue */ + if (i == slots_num) + len = IWL_MAX_CMD_SIZE; + + txq->cmd[i] = kmalloc(len, GFP_KERNEL); + if (!txq->cmd[i]) + goto err; + } + + /* Alloc driver data array and TFD circular buffer */ + ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id); + if (ret) + goto err; + + txq->need_update = 0; + + /* + * For the default queues 0-3, set up the swq_id + * already -- all others need to get one later + * (if they need one at all). + */ + if (txq_id < 4) + iwl_legacy_set_swq_id(txq, txq_id, txq_id); + + /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */ + BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + iwl_legacy_queue_init(priv, &txq->q, + TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + /* Tell device where to find queue */ + priv->cfg->ops->lib->txq_init(priv, txq); + + return 0; +err: + for (i = 0; i < actual_slots; i++) + kfree(txq->cmd[i]); +out_free_arrays: + kfree(txq->meta); + kfree(txq->cmd); + + return -ENOMEM; +} +EXPORT_SYMBOL(iwl_legacy_tx_queue_init); + +void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, + int slots_num, u32 txq_id) +{ + int actual_slots = slots_num; + + if (txq_id == priv->cmd_queue) + actual_slots++; + + memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); + + txq->need_update = 0; + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + iwl_legacy_queue_init(priv, &txq->q, + TFD_QUEUE_SIZE_MAX, slots_num, txq_id); + + /* Tell device where to find queue */ + priv->cfg->ops->lib->txq_init(priv, txq); +} +EXPORT_SYMBOL(iwl_legacy_tx_queue_reset); + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/** + * iwl_legacy_enqueue_hcmd - enqueue a uCode command + * @priv: device private data point + * @cmd: a point to the ucode command structure + * + * The function returns < 0 values to indicate the operation is + * failed. On success, it turns the index (> 0) of command in the + * command queue. + */ +int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +{ + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + struct iwl_queue *q = &txq->q; + struct iwl_device_cmd *out_cmd; + struct iwl_cmd_meta *out_meta; + dma_addr_t phys_addr; + unsigned long flags; + int len; + u32 idx; + u16 fix_size; + + cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); + fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); + + /* If any of the command structures end up being larger than + * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then + * we will need to increase the size of the TFD entries + * Also, check to see if command buffer should not exceed the size + * of device_cmd and max_cmd_size. */ + BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && + !(cmd->flags & CMD_SIZE_HUGE)); + BUG_ON(fix_size > IWL_MAX_CMD_SIZE); + + if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) { + IWL_WARN(priv, "Not sending command - %s KILL\n", + iwl_legacy_is_rfkill(priv) ? "RF" : "CT"); + return -EIO; + } + + if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + IWL_ERR(priv, "No space in command queue\n"); + IWL_ERR(priv, "Restarting adapter due to queue full\n"); + queue_work(priv->workqueue, &priv->restart); + return -ENOSPC; + } + + spin_lock_irqsave(&priv->hcmd_lock, flags); + + /* If this is a huge cmd, mark the huge flag also on the meta.flags + * of the _original_ cmd. This is used for DMA mapping clean up. + */ + if (cmd->flags & CMD_SIZE_HUGE) { + idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0); + txq->meta[idx].flags = CMD_SIZE_HUGE; + } + + idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); + out_cmd = txq->cmd[idx]; + out_meta = &txq->meta[idx]; + + memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ + out_meta->flags = cmd->flags; + if (cmd->flags & CMD_WANT_SKB) + out_meta->source = cmd; + if (cmd->flags & CMD_ASYNC) + out_meta->callback = cmd->callback; + + out_cmd->hdr.cmd = cmd->id; + memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); + + /* At this point, the out_cmd now has all of the incoming cmd + * information */ + + out_cmd->hdr.flags = 0; + out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + if (cmd->flags & CMD_SIZE_HUGE) + out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; + len = sizeof(struct iwl_device_cmd); + if (idx == TFD_CMD_SLOTS) + len = IWL_MAX_CMD_SIZE; + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + switch (out_cmd->hdr.cmd) { + case REPLY_TX_LINK_QUALITY_CMD: + case SENSITIVITY_CMD: + IWL_DEBUG_HC_DUMP(priv, + "Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + iwl_legacy_get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), fix_size, + q->write_ptr, idx, priv->cmd_queue); + break; + default: + IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + iwl_legacy_get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), fix_size, + q->write_ptr, idx, priv->cmd_queue); + } +#endif + txq->need_update = 1; + + if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) + /* Set up entry in queue's byte count circular buffer */ + priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); + + phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, + fix_size, PCI_DMA_BIDIRECTIONAL); + dma_unmap_addr_set(out_meta, mapping, phys_addr); + dma_unmap_len_set(out_meta, len, fix_size); + + trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr, + fix_size, cmd->flags); + + priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, + phys_addr, fix_size, 1, + U32_PAD(cmd->len)); + + /* Increment and update queue's write index */ + q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_legacy_txq_update_write_ptr(priv, txq); + + spin_unlock_irqrestore(&priv->hcmd_lock, flags); + return idx; +} + +/** + * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd + * + * When FW advances 'R' index, all entries between old and new 'R' index + * need to be reclaimed. As result, some free space forms. If there is + * enough free space (> low mark), wake the stack that feeds us. + */ +static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, + int idx, int cmd_idx) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + int nfreed = 0; + + if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) { + IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " + "is out of range [0-%d] %d %d.\n", txq_id, + idx, q->n_bd, q->write_ptr, q->read_ptr); + return; + } + + for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; + q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + if (nfreed++ > 0) { + IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, + q->write_ptr, q->read_ptr); + queue_work(priv->workqueue, &priv->restart); + } + + } +} + +/** + * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them + * @rxb: Rx buffer to reclaim + * + * If an Rx buffer has an async callback associated with it the callback + * will be executed. The attached skb (if present) will only be freed + * if the callback returns 1 + */ +void +iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u16 sequence = le16_to_cpu(pkt->hdr.sequence); + int txq_id = SEQ_TO_QUEUE(sequence); + int index = SEQ_TO_INDEX(sequence); + int cmd_index; + bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); + struct iwl_device_cmd *cmd; + struct iwl_cmd_meta *meta; + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + + /* If a Tx command is being handled and it isn't in the actual + * command queue then there a command routing bug has been introduced + * in the queue management code. */ + if (WARN(txq_id != priv->cmd_queue, + "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", + txq_id, priv->cmd_queue, sequence, + priv->txq[priv->cmd_queue].q.read_ptr, + priv->txq[priv->cmd_queue].q.write_ptr)) { + iwl_print_hex_error(priv, pkt, 32); + return; + } + + /* If this is a huge cmd, clear the huge flag on the meta.flags + * of the _original_ cmd. So that iwl_legacy_cmd_queue_free won't unmap + * the DMA buffer for the scan (huge) command. + */ + if (huge) { + cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, 0); + txq->meta[cmd_index].flags = 0; + } + cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge); + cmd = txq->cmd[cmd_index]; + meta = &txq->meta[cmd_index]; + + pci_unmap_single(priv->pci_dev, + dma_unmap_addr(meta, mapping), + dma_unmap_len(meta, len), + PCI_DMA_BIDIRECTIONAL); + + /* Input error checking is done when commands are added to queue. */ + if (meta->flags & CMD_WANT_SKB) { + meta->source->reply_page = (unsigned long)rxb_addr(rxb); + rxb->page = NULL; + } else if (meta->callback) + meta->callback(priv, cmd, pkt); + + iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); + + if (!(meta->flags & CMD_ASYNC)) { + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); + IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", + iwl_legacy_get_cmd_string(cmd->hdr.cmd)); + wake_up_interruptible(&priv->wait_command_queue); + } + meta->flags = 0; +} +EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete); diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c index 371abbf60eac..ab87e1b73529 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -61,7 +61,6 @@ #include "iwl-helpers.h" #include "iwl-dev.h" #include "iwl-spectrum.h" -#include "iwl-legacy.h" /* * module name, copyright, version, etc. @@ -70,7 +69,7 @@ #define DRV_DESCRIPTION \ "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG #define VD "d" #else #define VD @@ -82,7 +81,7 @@ * this was configurable. */ #define DRV_VERSION IWLWIFI_VERSION VD "s" -#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation" +#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" #define DRV_AUTHOR "<ilw@linux.intel.com>" MODULE_DESCRIPTION(DRV_DESCRIPTION); @@ -164,7 +163,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv, if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC) priv->stations[sta_id].sta.key.key_offset = - iwl_get_free_ucode_key_index(priv); + iwl_legacy_get_free_ucode_key_index(priv); /* else, we are overriding an existing key => no need to allocated room * in uCode. */ @@ -177,7 +176,8 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv, IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); - ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); + ret = iwl_legacy_send_add_sta(priv, + &priv->stations[sta_id].sta, CMD_ASYNC); spin_unlock_irqrestore(&priv->sta_lock, flags); @@ -201,7 +201,7 @@ static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv, static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) { unsigned long flags; - struct iwl_addsta_cmd sta_cmd; + struct iwl_legacy_addsta_cmd sta_cmd; spin_lock_irqsave(&priv->sta_lock, flags); memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); @@ -210,11 +210,11 @@ static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; - memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); + memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd)); spin_unlock_irqrestore(&priv->sta_lock, flags); IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); - return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); + return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); } static int iwl3945_set_dynamic_key(struct iwl_priv *priv, @@ -318,7 +318,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, int left) { - if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb) + if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb) return 0; if (priv->beacon_skb->len > left) @@ -344,12 +344,12 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv) return -ENOMEM; } - rate = iwl_rate_get_lowest_plcp(priv, + rate = iwl_legacy_get_lowest_plcp(priv, &priv->contexts[IWL_RXON_CTX_BSS]); frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); - rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, &frame->u.cmd[0]); iwl3945_free_frame(priv, frame); @@ -443,7 +443,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; } - priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags); + iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags); tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); if (ieee80211_is_mgmt(fc)) { @@ -485,7 +485,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) unsigned long flags; spin_lock_irqsave(&priv->lock, flags); - if (iwl_is_rfkill(priv)) { + if (iwl_legacy_is_rfkill(priv)) { IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); goto drop_unlock; } @@ -500,7 +500,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) fc = hdr->frame_control; -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG if (ieee80211_is_auth(fc)) IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); else if (ieee80211_is_assoc_req(fc)) @@ -514,7 +514,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) hdr_len = ieee80211_hdrlen(fc); /* Find index into station table for destination station */ - sta_id = iwl_sta_id_or_broadcast( + sta_id = iwl_legacy_sta_id_or_broadcast( priv, &priv->contexts[IWL_RXON_CTX_BSS], info->control.sta); if (sta_id == IWL_INVALID_STATION) { @@ -536,12 +536,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) txq = &priv->txq[txq_id]; q = &txq->q; - if ((iwl_queue_space(q) < q->high_mark)) + if ((iwl_legacy_queue_space(q) < q->high_mark)) goto drop; spin_lock_irqsave(&priv->lock, flags); - idx = get_cmd_index(q, q->write_ptr, 0); + idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0); /* Set up driver data for this TFD */ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); @@ -582,8 +582,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) len = (u16)skb->len; tx_cmd->len = cpu_to_le16(len); - iwl_dbg_log_tx_data_frame(priv, len, hdr); - iwl_update_stats(priv, true, fc, len); + iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr); + iwl_legacy_update_stats(priv, true, fc, len); tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; @@ -642,20 +642,20 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) /* Tell device the write index *just past* this latest filled TFD */ - q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); - iwl_txq_update_write_ptr(priv, txq); + q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); + iwl_legacy_txq_update_write_ptr(priv, txq); spin_unlock_irqrestore(&priv->lock, flags); - if ((iwl_queue_space(q) < q->high_mark) + if ((iwl_legacy_queue_space(q) < q->high_mark) && priv->mac80211_registered) { if (wait_write_ptr) { spin_lock_irqsave(&priv->lock, flags); txq->need_update = 1; - iwl_txq_update_write_ptr(priv, txq); + iwl_legacy_txq_update_write_ptr(priv, txq); spin_unlock_irqrestore(&priv->lock, flags); } - iwl_stop_queue(priv, txq); + iwl_legacy_stop_queue(priv, txq); } return 0; @@ -683,8 +683,8 @@ static int iwl3945_get_measurement(struct iwl_priv *priv, int duration = le16_to_cpu(params->duration); struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) - add_time = iwl_usecs_to_beacons(priv, + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) + add_time = iwl_legacy_usecs_to_beacons(priv, le64_to_cpu(params->start_time) - priv->_3945.last_tsf, le16_to_cpu(ctx->timing.beacon_interval)); @@ -697,9 +697,9 @@ static int iwl3945_get_measurement(struct iwl_priv *priv, cmd.len = sizeof(spectrum); spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); - if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) spectrum.start_time = - iwl_add_beacon_time(priv, + iwl_legacy_add_beacon_time(priv, priv->_3945.last_beacon_time, add_time, le16_to_cpu(ctx->timing.beacon_interval)); else @@ -712,7 +712,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv, spectrum.flags |= RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; - rc = iwl_send_cmd_sync(priv, &cmd); + rc = iwl_legacy_send_cmd_sync(priv, &cmd); if (rc) return rc; @@ -739,7 +739,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv, break; } - iwl_free_pages(priv, cmd.reply_page); + iwl_legacy_free_pages(priv, cmd.reply_page); return rc; } @@ -783,45 +783,19 @@ static void iwl3945_rx_reply_alive(struct iwl_priv *priv, static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG struct iwl_rx_packet *pkt = rxb_addr(rxb); #endif IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); } -static void iwl3945_bg_beacon_update(struct work_struct *work) -{ - struct iwl_priv *priv = - container_of(work, struct iwl_priv, beacon_update); - struct sk_buff *beacon; - - /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ - beacon = ieee80211_beacon_get(priv->hw, - priv->contexts[IWL_RXON_CTX_BSS].vif); - - if (!beacon) { - IWL_ERR(priv, "update beacon failed\n"); - return; - } - - mutex_lock(&priv->mutex); - /* new beacon skb is allocated every time; dispose previous.*/ - if (priv->beacon_skb) - dev_kfree_skb(priv->beacon_skb); - - priv->beacon_skb = beacon; - mutex_unlock(&priv->mutex); - - iwl3945_send_beacon_cmd(priv); -} - static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG u8 rate = beacon->beacon_notify_hdr.rate; IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " @@ -835,9 +809,6 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); - if ((priv->iw_mode == NL80211_IFTYPE_AP) && - (!test_bit(STATUS_EXIT_PENDING, &priv->status))) - queue_work(priv->workqueue, &priv->beacon_update); } /* Handle notification from uCode that card's power state is changing @@ -862,7 +833,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, clear_bit(STATUS_RF_KILL_HW, &priv->status); - iwl_scan_cancel(priv); + iwl_legacy_scan_cancel(priv); if ((test_bit(STATUS_RF_KILL_HW, &status) != test_bit(STATUS_RF_KILL_HW, &priv->status))) @@ -885,13 +856,13 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv) { priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; - priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; - priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; + priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa; priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = - iwl_rx_spectrum_measure_notif; - priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; + iwl_legacy_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif; priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = - iwl_rx_pm_debug_statistics_notif; + iwl_legacy_rx_pm_debug_statistics_notif; priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif; /* @@ -902,7 +873,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv) priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics; priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; - iwl_setup_rx_scan_handlers(priv); + iwl_legacy_setup_rx_scan_handlers(priv); priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; /* Set up hardware specific Rx handlers */ @@ -1003,7 +974,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv) spin_lock_irqsave(&rxq->lock, flags); write = rxq->write & ~0x7; - while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { + while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) { /* Get next free Rx buffer, remove from free list */ element = rxq->rx_free.next; rxb = list_entry(element, struct iwl_rx_mem_buffer, list); @@ -1029,7 +1000,7 @@ static void iwl3945_rx_queue_restock(struct iwl_priv *priv) spin_lock_irqsave(&rxq->lock, flags); rxq->need_update = 1; spin_unlock_irqrestore(&rxq->lock, flags); - iwl_rx_queue_update_write_ptr(priv, rxq); + iwl_legacy_rx_queue_update_write_ptr(priv, rxq); } } @@ -1123,7 +1094,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, PAGE_SIZE << priv->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); - __iwl_free_pages(priv, rxq->pool[i].page); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); rxq->pool[i].page = NULL; } list_add_tail(&rxq->pool[i].list, &rxq->rx_used); @@ -1170,7 +1141,7 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, PAGE_SIZE << priv->hw_params.rx_page_order, PCI_DMA_FROMDEVICE); - __iwl_free_pages(priv, rxq->pool[i].page); + __iwl_legacy_free_pages(priv, rxq->pool[i].page); rxq->pool[i].page = NULL; } } @@ -1275,7 +1246,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; len += sizeof(u32); /* account for status word */ - trace_iwlwifi_dev_rx(priv, pkt, len); + trace_iwlwifi_legacy_dev_rx(priv, pkt, len); /* Reclaim a command buffer only if this packet is a response * to a (driver-originated) command. @@ -1292,14 +1263,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) * rx_handlers table. See iwl3945_setup_rx_handlers() */ if (priv->rx_handlers[pkt->hdr.cmd]) { IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, - get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); + iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); } else { /* No handling needed */ IWL_DEBUG_RX(priv, "r %d i %d No handler needed for %s, 0x%02x\n", - r, i, get_cmd_string(pkt->hdr.cmd), + r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); } @@ -1312,10 +1283,10 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) if (reclaim) { /* Invoke any callbacks, transfer the buffer to caller, - * and fire off the (possibly) blocking iwl_send_cmd() + * and fire off the (possibly) blocking iwl_legacy_send_cmd() * as we reclaim the driver command queue */ if (rxb->page) - iwl_tx_cmd_complete(priv, rxb); + iwl_legacy_tx_cmd_complete(priv, rxb); else IWL_WARN(priv, "Claim null rxb?\n"); } @@ -1357,14 +1328,14 @@ static void iwl3945_rx_handle(struct iwl_priv *priv) } /* call this function to flush any scheduled tasklet */ -static inline void iwl_synchronize_irq(struct iwl_priv *priv) +static inline void iwl3945_synchronize_irq(struct iwl_priv *priv) { /* wait to make sure we flush pending tasklet*/ synchronize_irq(priv->pci_dev->irq); tasklet_kill(&priv->irq_tasklet); } -static const char *desc_lookup(int i) +static const char *iwl3945_desc_lookup(int i) { switch (i) { case 1: @@ -1401,7 +1372,7 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv) } - count = iwl_read_targ_mem(priv, base); + count = iwl_legacy_read_targ_mem(priv, base); if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { IWL_ERR(priv, "Start IWL Error Log Dump:\n"); @@ -1414,25 +1385,25 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv) for (i = ERROR_START_OFFSET; i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; i += ERROR_ELEM_SIZE) { - desc = iwl_read_targ_mem(priv, base + i); + desc = iwl_legacy_read_targ_mem(priv, base + i); time = - iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32)); + iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32)); blink1 = - iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32)); + iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32)); blink2 = - iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32)); + iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32)); ilink1 = - iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32)); + iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32)); ilink2 = - iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32)); + iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32)); data1 = - iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32)); + iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32)); IWL_ERR(priv, "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", - desc_lookup(desc), desc, time, blink1, blink2, + iwl3945_desc_lookup(desc), desc, time, blink1, blink2, ilink1, ilink2, data1); - trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0, + trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0, 0, blink1, blink2, ilink1, ilink2); } } @@ -1471,14 +1442,14 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, iwl_grab_nic_access(priv); /* Set starting address; reads will auto-increment */ - _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); rmb(); /* "time" is actually "data" for mode 0 (no timestamp). * place event id # at far right for easier visual parsing. */ for (i = 0; i < num_events; i++) { - ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); - time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); if (mode == 0) { /* data, ev */ if (bufsz) { @@ -1487,11 +1458,12 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, time, ev); } else { IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); - trace_iwlwifi_dev_ucode_event(priv, 0, + trace_iwlwifi_legacy_dev_ucode_event(priv, 0, time, ev); } } else { - data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + data = _iwl_legacy_read_direct32(priv, + HBUS_TARG_MEM_RDAT); if (bufsz) { pos += scnprintf(*buf + pos, bufsz - pos, "%010u:0x%08x:%04u\n", @@ -1499,7 +1471,7 @@ static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, } else { IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev); - trace_iwlwifi_dev_ucode_event(priv, time, + trace_iwlwifi_legacy_dev_ucode_event(priv, time, data, ev); } } @@ -1570,10 +1542,10 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, } /* event log header */ - capacity = iwl_read_targ_mem(priv, base); - mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); - num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); - next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); + capacity = iwl_legacy_read_targ_mem(priv, base); + mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32))); + num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32))); + next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32))); if (capacity > priv->cfg->base_params->max_event_log_size) { IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", @@ -1595,8 +1567,8 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, return pos; } -#ifdef CONFIG_IWLWIFI_DEBUG - if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; #else @@ -1607,7 +1579,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n", size); -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG if (display) { if (full_log) bufsz = capacity * 48; @@ -1617,7 +1589,7 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, if (!*buf) return -ENOMEM; } - if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { + if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { /* if uCode has wrapped back to top of log, * start at the oldest entry, * i.e the next one that uCode would fill. @@ -1647,7 +1619,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv) u32 inta, handled = 0; u32 inta_fh; unsigned long flags; -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG u32 inta_mask; #endif @@ -1665,8 +1637,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv) inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_get_debug_level(priv) & IWL_DL_ISR) { +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) { /* just for debug */ inta_mask = iwl_read32(priv, CSR_INT_MASK); IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", @@ -1690,18 +1662,18 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv) IWL_ERR(priv, "Hardware error detected. Restarting.\n"); /* Tell the device to stop sending interrupts */ - iwl_disable_interrupts(priv); + iwl_legacy_disable_interrupts(priv); priv->isr_stats.hw++; - iwl_irq_handle_error(priv); + iwl_legacy_irq_handle_error(priv); handled |= CSR_INT_BIT_HW_ERR; return; } -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { /* NIC fires this, but we don't use it, redundant with WAKEUP */ if (inta & CSR_INT_BIT_SCD) { IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " @@ -1724,20 +1696,20 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv) IWL_ERR(priv, "Microcode SW error detected. " "Restarting 0x%X.\n", inta); priv->isr_stats.sw++; - iwl_irq_handle_error(priv); + iwl_legacy_irq_handle_error(priv); handled |= CSR_INT_BIT_SW_ERR; } /* uCode wakes up after power-down sleep */ if (inta & CSR_INT_BIT_WAKEUP) { IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); - iwl_rx_queue_update_write_ptr(priv, &priv->rxq); - iwl_txq_update_write_ptr(priv, &priv->txq[0]); - iwl_txq_update_write_ptr(priv, &priv->txq[1]); - iwl_txq_update_write_ptr(priv, &priv->txq[2]); - iwl_txq_update_write_ptr(priv, &priv->txq[3]); - iwl_txq_update_write_ptr(priv, &priv->txq[4]); - iwl_txq_update_write_ptr(priv, &priv->txq[5]); + iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]); + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]); priv->isr_stats.wakeup++; handled |= CSR_INT_BIT_WAKEUP; @@ -1757,7 +1729,7 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv) priv->isr_stats.tx++; iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); - iwl_write_direct32(priv, FH39_TCSR_CREDIT + iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT (FH39_SRVC_CHNL), 0x0); handled |= CSR_INT_BIT_FH_TX; } @@ -1776,10 +1748,10 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv) /* Re-enable all interrupts */ /* only Re-enable if disabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) - iwl_enable_interrupts(priv); + iwl_legacy_enable_interrupts(priv); -#ifdef CONFIG_IWLWIFI_DEBUG - if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { inta = iwl_read32(priv, CSR_INT); inta_mask = iwl_read32(priv, CSR_INT_MASK); inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); @@ -1806,14 +1778,14 @@ static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv, return added; } - active_dwell = iwl_get_active_dwell_time(priv, band, 0); - passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); + active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0); + passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); if (passive_dwell <= active_dwell) passive_dwell = active_dwell + 1; - channel = iwl_get_single_channel_number(priv, band); + channel = iwl_legacy_get_single_channel_number(priv, band); if (channel) { scan_ch->channel = channel; @@ -1849,8 +1821,8 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, if (!sband) return 0; - active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); - passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); + active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes); + passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); if (passive_dwell <= active_dwell) passive_dwell = active_dwell + 1; @@ -1863,10 +1835,12 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, scan_ch->channel = chan->hw_value; - ch_info = iwl_get_channel_info(priv, band, scan_ch->channel); - if (!is_channel_valid(ch_info)) { - IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n", - scan_ch->channel); + ch_info = iwl_legacy_get_channel_info(priv, band, + scan_ch->channel); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_SCAN(priv, + "Channel %d is INVALID for this band.\n", + scan_ch->channel); continue; } @@ -1875,7 +1849,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, /* If passive , set up for auto-switch * and use long active_dwell time. */ - if (!is_active || is_channel_passive(ch_info) || + if (!is_active || iwl_legacy_is_channel_passive(ch_info) || (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { scan_ch->type = 0; /* passive */ if (IWL_UCODE_API(priv->ucode_ver) == 1) @@ -1955,12 +1929,12 @@ static void iwl3945_init_hw_rates(struct iwl_priv *priv, static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv) { - iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); - iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); - iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); - iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); - iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); - iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot); } /** @@ -1976,7 +1950,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); - iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, IWL39_RTC_INST_LOWER_BOUND); errcnt = 0; @@ -1984,7 +1958,7 @@ static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 le /* read data comes through single port, auto-incr addr */ /* NOTE: Use the debugless read so we don't flood kernel log * if IWL_DL_IO is set */ - val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) { IWL_ERR(priv, "uCode INST section is invalid at " "offset 0x%x, is 0x%x, s/b 0x%x\n", @@ -2023,9 +1997,9 @@ static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 /* read data comes through single port, auto-incr addr */ /* NOTE: Use the debugless read so we don't flood kernel log * if IWL_DL_IO is set */ - iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, + iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, i + IWL39_RTC_INST_LOWER_BOUND); - val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); + val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); if (val != le32_to_cpu(*image)) { #if 0 /* Enable this if you want to see details */ IWL_ERR(priv, "uCode INST section is invalid at " @@ -2101,7 +2075,7 @@ static void iwl3945_nic_start(struct iwl_priv *priv) #define IWL3945_UCODE_GET(item) \ static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\ { \ - return le32_to_cpu(ucode->u.v1.item); \ + return le32_to_cpu(ucode->v1.item); \ } static u32 iwl3945_ucode_get_header_size(u32 api_ver) @@ -2111,7 +2085,7 @@ static u32 iwl3945_ucode_get_header_size(u32 api_ver) static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode) { - return (u8 *) ucode->u.v1.data; + return (u8 *) ucode->v1.data; } IWL3945_UCODE_GET(inst_size); @@ -2286,13 +2260,13 @@ static int iwl3945_read_ucode(struct iwl_priv *priv) * 1) unmodified from disk * 2) backup cache for save/restore during power-downs */ priv->ucode_code.len = inst_size; - iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); priv->ucode_data.len = data_size; - iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); priv->ucode_data_backup.len = data_size; - iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || !priv->ucode_data_backup.v_addr) @@ -2301,10 +2275,10 @@ static int iwl3945_read_ucode(struct iwl_priv *priv) /* Initialization instructions and data */ if (init_size && init_data_size) { priv->ucode_init.len = init_size; - iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); priv->ucode_init_data.len = init_data_size; - iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) goto err_pci_alloc; @@ -2313,7 +2287,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv) /* Bootstrap (instructions only, no data) */ if (boot_size) { priv->ucode_boot.len = boot_size; - iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); if (!priv->ucode_boot.v_addr) goto err_pci_alloc; @@ -2400,14 +2374,14 @@ static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv) pdata = priv->ucode_data_backup.p_addr; /* Tell bootstrap uCode where to find image to load */ - iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); - iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); - iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, + iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); + iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, priv->ucode_data.len); /* Inst byte count must be last to set up, bit 31 signals uCode * that all new ptr/size info is in place */ - iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, + iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, priv->ucode_code.len | BSM_DRAM_INST_LOAD); IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); @@ -2488,7 +2462,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv) goto restart; } - rfkill = iwl_read_prph(priv, APMG_RFKILL_REG); + rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG); IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); if (rfkill & 0x1) { @@ -2510,18 +2484,18 @@ static void iwl3945_alive_start(struct iwl_priv *priv) set_bit(STATUS_ALIVE, &priv->status); /* Enable watchdog to monitor the driver tx queues */ - iwl_setup_watchdog(priv); + iwl_legacy_setup_watchdog(priv); - if (iwl_is_rfkill(priv)) + if (iwl_legacy_is_rfkill(priv)) return; ieee80211_wake_queues(priv->hw); - priv->active_rate = IWL_RATES_MASK; + priv->active_rate = IWL_RATES_MASK_3945; - iwl_power_update_mode(priv, true); + iwl_legacy_power_update_mode(priv, true); - if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { struct iwl3945_rxon_cmd *active_rxon = (struct iwl3945_rxon_cmd *)(&ctx->active); @@ -2529,21 +2503,20 @@ static void iwl3945_alive_start(struct iwl_priv *priv) active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; } else { /* Initialize our rx_config data */ - iwl_connection_init_rx_config(priv, ctx); + iwl_legacy_connection_init_rx_config(priv, ctx); } /* Configure Bluetooth device coexistence support */ - priv->cfg->ops->hcmd->send_bt_config(priv); + iwl_legacy_send_bt_config(priv); + + set_bit(STATUS_READY, &priv->status); /* Configure the adapter for unassociated operation */ iwl3945_commit_rxon(priv, ctx); iwl3945_reg_txpower_periodic(priv); - iwl_leds_init(priv); - IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); - set_bit(STATUS_READY, &priv->status); wake_up_interruptible(&priv->wait_command_queue); return; @@ -2561,7 +2534,7 @@ static void __iwl3945_down(struct iwl_priv *priv) IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); - iwl_scan_cancel_timeout(priv, 200); + iwl_legacy_scan_cancel_timeout(priv, 200); exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); @@ -2570,9 +2543,9 @@ static void __iwl3945_down(struct iwl_priv *priv) del_timer_sync(&priv->watchdog); /* Station information will now be cleared in device */ - iwl_clear_ucode_stations(priv, NULL); - iwl_dealloc_bcast_stations(priv); - iwl_clear_driver_stations(priv); + iwl_legacy_clear_ucode_stations(priv, NULL); + iwl_legacy_dealloc_bcast_stations(priv); + iwl_legacy_clear_driver_stations(priv); /* Unblock any waiting calls */ wake_up_interruptible_all(&priv->wait_command_queue); @@ -2587,16 +2560,16 @@ static void __iwl3945_down(struct iwl_priv *priv) /* tell the device to stop sending interrupts */ spin_lock_irqsave(&priv->lock, flags); - iwl_disable_interrupts(priv); + iwl_legacy_disable_interrupts(priv); spin_unlock_irqrestore(&priv->lock, flags); - iwl_synchronize_irq(priv); + iwl3945_synchronize_irq(priv); if (priv->mac80211_registered) ieee80211_stop_queues(priv->hw); /* If we have not previously called iwl3945_init() then * clear all bits but the RF Kill bits and return */ - if (!iwl_is_init(priv)) { + if (!iwl_legacy_is_init(priv)) { priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << STATUS_RF_KILL_HW | test_bit(STATUS_GEO_CONFIGURED, &priv->status) << @@ -2621,11 +2594,11 @@ static void __iwl3945_down(struct iwl_priv *priv) iwl3945_hw_rxq_stop(priv); /* Power-down device's busmaster DMA clocks */ - iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); + iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); udelay(5); /* Stop the device, and put it in low power state */ - iwl_apm_stop(priv); + iwl_legacy_apm_stop(priv); exit: memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); @@ -2656,7 +2629,8 @@ static int iwl3945_alloc_bcast_station(struct iwl_priv *priv) u8 sta_id; spin_lock_irqsave(&priv->sta_lock, flags); - sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL); + sta_id = iwl_legacy_prep_station(priv, ctx, + iwlegacy_bcast_addr, false, NULL); if (sta_id == IWL_INVALID_STATION) { IWL_ERR(priv, "Unable to prepare broadcast station\n"); spin_unlock_irqrestore(&priv->sta_lock, flags); @@ -2714,7 +2688,7 @@ static int __iwl3945_up(struct iwl_priv *priv) /* clear (again), then enable host interrupts */ iwl_write32(priv, CSR_INT, 0xFFFFFFFF); - iwl_enable_interrupts(priv); + iwl_legacy_enable_interrupts(priv); /* really make sure rfkill handshake bits are cleared */ iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); @@ -2856,21 +2830,18 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; scan->quiet_time = IWL_ACTIVE_QUIET_TIME; - if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { + if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { u16 interval = 0; u32 extra; u32 suspend_time = 100; u32 scan_suspend_time = 100; - unsigned long flags; IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); - spin_lock_irqsave(&priv->lock, flags); if (priv->is_internal_short_scan) interval = 0; else interval = vif->bss_conf.beacon_int; - spin_unlock_irqrestore(&priv->lock, flags); scan->suspend_time = 0; scan->max_out_time = cpu_to_le32(200 * 1024); @@ -2947,7 +2918,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) if (!priv->is_internal_short_scan) { scan->tx_cmd.len = cpu_to_le16( - iwl_fill_probe_req(priv, + iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, vif->addr, priv->scan_request->ie, @@ -2956,9 +2927,9 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) } else { /* use bcast addr, will not be transmitted but must be valid */ scan->tx_cmd.len = cpu_to_le16( - iwl_fill_probe_req(priv, + iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, - iwl_bcast_addr, NULL, 0, + iwlegacy_bcast_addr, NULL, 0, IWL_MAX_SCAN_SIZE - sizeof(*scan))); } /* select Rx antennas */ @@ -2986,7 +2957,7 @@ int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) scan->len = cpu_to_le16(cmd.len); set_bit(STATUS_SCAN_HW, &priv->status); - ret = iwl_send_cmd_sync(priv, &cmd); + ret = iwl_legacy_send_cmd_sync(priv, &cmd); if (ret) clear_bit(STATUS_SCAN_HW, &priv->status); return ret; @@ -3054,25 +3025,20 @@ void iwl3945_post_associate(struct iwl_priv *priv) if (!ctx->vif || !priv->is_open) return; - if (ctx->vif->type == NL80211_IFTYPE_AP) { - IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); - return; - } - IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", ctx->vif->bss_conf.aid, ctx->active.bssid_addr); if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; - iwl_scan_cancel_timeout(priv, 200); + iwl_legacy_scan_cancel_timeout(priv, 200); - conf = ieee80211_get_hw_conf(priv->hw); + conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; iwl3945_commit_rxon(priv, ctx); - rc = iwl_send_rxon_timing(priv, ctx); + rc = iwl_legacy_send_rxon_timing(priv, ctx); if (rc) IWL_WARN(priv, "REPLY_RXON_TIMING failed - " "Attempting to continue.\n"); @@ -3170,8 +3136,6 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw) * no need to poll the killswitch state anymore */ cancel_delayed_work(&priv->_3945.rfkill_poll); - iwl_led_start(priv); - priv->is_open = 1; IWL_DEBUG_MAC80211(priv, "leave\n"); return 0; @@ -3206,7 +3170,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw) IWL_DEBUG_MAC80211(priv, "leave\n"); } -static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct iwl_priv *priv = hw->priv; @@ -3219,7 +3183,6 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) dev_kfree_skb_any(skb); IWL_DEBUG_MAC80211(priv, "leave\n"); - return NETDEV_TX_OK; } void iwl3945_config_ap(struct iwl_priv *priv) @@ -3232,14 +3195,14 @@ void iwl3945_config_ap(struct iwl_priv *priv) return; /* The following should be done only at AP bring up */ - if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) { + if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) { /* RXON - unassoc (to set timing command) */ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; iwl3945_commit_rxon(priv, ctx); /* RXON Timing */ - rc = iwl_send_rxon_timing(priv, ctx); + rc = iwl_legacy_send_rxon_timing(priv, ctx); if (rc) IWL_WARN(priv, "REPLY_RXON_TIMING failed - " "Attempting to continue.\n"); @@ -3266,10 +3229,6 @@ void iwl3945_config_ap(struct iwl_priv *priv) iwl3945_commit_rxon(priv, ctx); } iwl3945_send_beacon_cmd(priv); - - /* FIXME - we need to add code here to detect a totally new - * configuration, reset the AP, unassoc, rxon timing, assoc, - * clear sta table, add BCAST sta... */ } static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, @@ -3289,17 +3248,25 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } - static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS); + /* + * To support IBSS RSN, don't program group keys in IBSS, the + * hardware will then not attempt to decrypt the frames. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS); if (!static_key) { - sta_id = iwl_sta_id_or_broadcast( + sta_id = iwl_legacy_sta_id_or_broadcast( priv, &priv->contexts[IWL_RXON_CTX_BSS], sta); if (sta_id == IWL_INVALID_STATION) return -EINVAL; } mutex_lock(&priv->mutex); - iwl_scan_cancel_timeout(priv, 100); + iwl_legacy_scan_cancel_timeout(priv, 100); switch (cmd) { case SET_KEY: @@ -3344,7 +3311,8 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, sta_priv->common.sta_id = IWL_INVALID_STATION; - ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS], + ret = iwl_legacy_add_station_common(priv, + &priv->contexts[IWL_RXON_CTX_BSS], sta->addr, is_ap, sta, &sta_id); if (ret) { IWL_ERR(priv, "Unable to add station %pM (%d)\n", @@ -3405,7 +3373,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw, /* * Receiving all multicast frames is always enabled by the - * default flags setup in iwl_connection_init_rx_config() + * default flags setup in iwl_legacy_connection_init_rx_config() * since we currently do not support programming multicast * filters into the device. */ @@ -3420,7 +3388,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw, * *****************************************************************************/ -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG /* * The following adds a new attribute to the sysfs representation @@ -3433,13 +3401,13 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw, * level that is used instead of the global debug level if it (the per * device debug level) is set. */ -static ssize_t show_debug_level(struct device *d, +static ssize_t iwl3945_show_debug_level(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); - return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); + return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv)); } -static ssize_t store_debug_level(struct device *d, +static ssize_t iwl3945_store_debug_level(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -3452,7 +3420,7 @@ static ssize_t store_debug_level(struct device *d, IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf); else { priv->debug_level = val; - if (iwl_alloc_traffic_mem(priv)) + if (iwl_legacy_alloc_traffic_mem(priv)) IWL_ERR(priv, "Not enough memory to generate traffic log\n"); } @@ -3460,31 +3428,31 @@ static ssize_t store_debug_level(struct device *d, } static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, - show_debug_level, store_debug_level); + iwl3945_show_debug_level, iwl3945_store_debug_level); -#endif /* CONFIG_IWLWIFI_DEBUG */ +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ -static ssize_t show_temperature(struct device *d, +static ssize_t iwl3945_show_temperature(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); - if (!iwl_is_alive(priv)) + if (!iwl_legacy_is_alive(priv)) return -EAGAIN; return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); } -static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); +static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL); -static ssize_t show_tx_power(struct device *d, +static ssize_t iwl3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); return sprintf(buf, "%d\n", priv->tx_power_user_lmt); } -static ssize_t store_tx_power(struct device *d, +static ssize_t iwl3945_store_tx_power(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -3501,9 +3469,9 @@ static ssize_t store_tx_power(struct device *d, return count; } -static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power); -static ssize_t show_flags(struct device *d, +static ssize_t iwl3945_show_flags(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); @@ -3512,7 +3480,7 @@ static ssize_t show_flags(struct device *d, return sprintf(buf, "0x%04X\n", ctx->active.flags); } -static ssize_t store_flags(struct device *d, +static ssize_t iwl3945_store_flags(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -3523,7 +3491,7 @@ static ssize_t store_flags(struct device *d, mutex_lock(&priv->mutex); if (le32_to_cpu(ctx->staging.flags) != flags) { /* Cancel any currently running scans... */ - if (iwl_scan_cancel_timeout(priv, 100)) + if (iwl_legacy_scan_cancel_timeout(priv, 100)) IWL_WARN(priv, "Could not cancel scan.\n"); else { IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", @@ -3537,9 +3505,9 @@ static ssize_t store_flags(struct device *d, return count; } -static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); +static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags); -static ssize_t show_filter_flags(struct device *d, +static ssize_t iwl3945_show_filter_flags(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); @@ -3549,7 +3517,7 @@ static ssize_t show_filter_flags(struct device *d, le32_to_cpu(ctx->active.filter_flags)); } -static ssize_t store_filter_flags(struct device *d, +static ssize_t iwl3945_store_filter_flags(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -3560,7 +3528,7 @@ static ssize_t store_filter_flags(struct device *d, mutex_lock(&priv->mutex); if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) { /* Cancel any currently running scans... */ - if (iwl_scan_cancel_timeout(priv, 100)) + if (iwl_legacy_scan_cancel_timeout(priv, 100)) IWL_WARN(priv, "Could not cancel scan.\n"); else { IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " @@ -3575,10 +3543,10 @@ static ssize_t store_filter_flags(struct device *d, return count; } -static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, - store_filter_flags); +static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags, + iwl3945_store_filter_flags); -static ssize_t show_measurement(struct device *d, +static ssize_t iwl3945_show_measurement(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); @@ -3610,7 +3578,7 @@ static ssize_t show_measurement(struct device *d, return len; } -static ssize_t store_measurement(struct device *d, +static ssize_t iwl3945_store_measurement(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -3647,9 +3615,9 @@ static ssize_t store_measurement(struct device *d, } static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, - show_measurement, store_measurement); + iwl3945_show_measurement, iwl3945_store_measurement); -static ssize_t store_retry_rate(struct device *d, +static ssize_t iwl3945_store_retry_rate(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -3662,38 +3630,38 @@ static ssize_t store_retry_rate(struct device *d, return count; } -static ssize_t show_retry_rate(struct device *d, +static ssize_t iwl3945_show_retry_rate(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); return sprintf(buf, "%d", priv->retry_rate); } -static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, - store_retry_rate); +static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate, + iwl3945_store_retry_rate); -static ssize_t show_channels(struct device *d, +static ssize_t iwl3945_show_channels(struct device *d, struct device_attribute *attr, char *buf) { /* all this shit doesn't belong into sysfs anyway */ return 0; } -static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); +static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL); -static ssize_t show_antenna(struct device *d, +static ssize_t iwl3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); - if (!iwl_is_alive(priv)) + if (!iwl_legacy_is_alive(priv)) return -EAGAIN; return sprintf(buf, "%d\n", iwl3945_mod_params.antenna); } -static ssize_t store_antenna(struct device *d, +static ssize_t iwl3945_store_antenna(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -3718,20 +3686,20 @@ static ssize_t store_antenna(struct device *d, return count; } -static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); +static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna); -static ssize_t show_status(struct device *d, +static ssize_t iwl3945_show_status(struct device *d, struct device_attribute *attr, char *buf) { struct iwl_priv *priv = dev_get_drvdata(d); - if (!iwl_is_alive(priv)) + if (!iwl_legacy_is_alive(priv)) return -EAGAIN; return sprintf(buf, "0x%08x\n", (int)priv->status); } -static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); +static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL); -static ssize_t dump_error_log(struct device *d, +static ssize_t iwl3945_dump_error_log(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { @@ -3744,7 +3712,7 @@ static ssize_t dump_error_log(struct device *d, return strnlen(buf, count); } -static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); +static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log); /***************************************************************************** * @@ -3760,18 +3728,17 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv) INIT_WORK(&priv->restart, iwl3945_bg_restart); INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); - INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); - iwl_setup_scan_deferred_work(priv); + iwl_legacy_setup_scan_deferred_work(priv); iwl3945_hw_setup_deferred_work(priv); init_timer(&priv->watchdog); priv->watchdog.data = (unsigned long)priv; - priv->watchdog.function = iwl_bg_watchdog; + priv->watchdog.function = iwl_legacy_bg_watchdog; tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) iwl3945_irq_tasklet, (unsigned long)priv); @@ -3783,9 +3750,8 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv) cancel_delayed_work_sync(&priv->init_alive_start); cancel_delayed_work(&priv->alive_start); - cancel_work_sync(&priv->beacon_update); - iwl_cancel_scan_deferred_work(priv); + iwl_legacy_cancel_scan_deferred_work(priv); } static struct attribute *iwl3945_sysfs_entries[] = { @@ -3799,7 +3765,7 @@ static struct attribute *iwl3945_sysfs_entries[] = { &dev_attr_status.attr, &dev_attr_temperature.attr, &dev_attr_tx_power.attr, -#ifdef CONFIG_IWLWIFI_DEBUG +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG &dev_attr_debug_level.attr, #endif NULL @@ -3814,19 +3780,19 @@ struct ieee80211_ops iwl3945_hw_ops = { .tx = iwl3945_mac_tx, .start = iwl3945_mac_start, .stop = iwl3945_mac_stop, - .add_interface = iwl_mac_add_interface, - .remove_interface = iwl_mac_remove_interface, - .change_interface = iwl_mac_change_interface, + .add_interface = iwl_legacy_mac_add_interface, + .remove_interface = iwl_legacy_mac_remove_interface, + .change_interface = iwl_legacy_mac_change_interface, .config = iwl_legacy_mac_config, .configure_filter = iwl3945_configure_filter, .set_key = iwl3945_mac_set_key, - .conf_tx = iwl_mac_conf_tx, + .conf_tx = iwl_legacy_mac_conf_tx, .reset_tsf = iwl_legacy_mac_reset_tsf, .bss_info_changed = iwl_legacy_mac_bss_info_changed, - .hw_scan = iwl_mac_hw_scan, + .hw_scan = iwl_legacy_mac_hw_scan, .sta_add = iwl3945_mac_sta_add, - .sta_remove = iwl_mac_sta_remove, - .tx_last_beacon = iwl_mac_tx_last_beacon, + .sta_remove = iwl_legacy_mac_sta_remove, + .tx_last_beacon = iwl_legacy_mac_tx_last_beacon, }; static int iwl3945_init_drv(struct iwl_priv *priv) @@ -3868,7 +3834,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv) ret = -EINVAL; goto err; } - ret = iwl_init_channel_map(priv); + ret = iwl_legacy_init_channel_map(priv); if (ret) { IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); goto err; @@ -3880,7 +3846,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv) goto err_free_channel_map; } - ret = iwlcore_init_geos(priv); + ret = iwl_legacy_init_geos(priv); if (ret) { IWL_ERR(priv, "initializing geos failed: %d\n", ret); goto err_free_channel_map; @@ -3890,7 +3856,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv) return 0; err_free_channel_map: - iwl_free_channel_map(priv); + iwl_legacy_free_channel_map(priv); err: return ret; } @@ -3910,15 +3876,12 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT; - if (!priv->cfg->base_params->broken_powersave) - hw->flags |= IEEE80211_HW_SUPPORTS_PS | - IEEE80211_HW_SUPPORTS_DYNAMIC_PS; - hw->wiphy->interface_modes = priv->contexts[IWL_RXON_CTX_BSS].interface_modes; hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | - WIPHY_FLAG_DISABLE_BEACON_HINTS; + WIPHY_FLAG_DISABLE_BEACON_HINTS | + WIPHY_FLAG_IBSS_RSN; hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; /* we create the 802.11 header and a zero-length SSID element */ @@ -3935,6 +3898,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv) priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->bands[IEEE80211_BAND_5GHZ]; + iwl_legacy_leds_init(priv); + ret = ieee80211_register_hw(priv->hw); if (ret) { IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); @@ -3960,7 +3925,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e /* mac80211 allocates memory for this device instance, including * space for this driver's private structure */ - hw = iwl_alloc_all(cfg); + hw = iwl_legacy_alloc_all(cfg); if (hw == NULL) { pr_err("Can not allocate network device\n"); err = -ENOMEM; @@ -4000,13 +3965,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e iwl3945_hw_ops.hw_scan = NULL; } - IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); priv->cfg = cfg; priv->pci_dev = pdev; priv->inta_mask = CSR_INI_SET_MASK; - if (iwl_alloc_traffic_mem(priv)) + if (iwl_legacy_alloc_traffic_mem(priv)) IWL_ERR(priv, "Not enough memory to generate traffic log\n"); /*************************** @@ -4070,7 +4034,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e * ********************/ /* Read the EEPROM */ - err = iwl_eeprom_init(priv); + err = iwl_legacy_eeprom_init(priv); if (err) { IWL_ERR(priv, "Unable to init EEPROM\n"); goto out_iounmap; @@ -4107,12 +4071,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e * ********************/ spin_lock_irqsave(&priv->lock, flags); - iwl_disable_interrupts(priv); + iwl_legacy_disable_interrupts(priv); spin_unlock_irqrestore(&priv->lock, flags); pci_enable_msi(priv->pci_dev); - err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr, + err = request_irq(priv->pci_dev->irq, iwl_legacy_isr, IRQF_SHARED, DRV_NAME, priv); if (err) { IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); @@ -4125,24 +4089,24 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e goto out_release_irq; } - iwl_set_rxon_channel(priv, + iwl_legacy_set_rxon_channel(priv, &priv->bands[IEEE80211_BAND_2GHZ].channels[5], &priv->contexts[IWL_RXON_CTX_BSS]); iwl3945_setup_deferred_work(priv); iwl3945_setup_rx_handlers(priv); - iwl_power_initialize(priv); + iwl_legacy_power_initialize(priv); /********************************* * 8. Setup and Register mac80211 * *******************************/ - iwl_enable_interrupts(priv); + iwl_legacy_enable_interrupts(priv); err = iwl3945_setup_mac(priv); if (err) goto out_remove_sysfs; - err = iwl_dbgfs_register(priv, DRV_NAME); + err = iwl_legacy_dbgfs_register(priv, DRV_NAME); if (err) IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); @@ -4160,12 +4124,12 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e free_irq(priv->pci_dev->irq, priv); out_disable_msi: pci_disable_msi(priv->pci_dev); - iwlcore_free_geos(priv); - iwl_free_channel_map(priv); + iwl_legacy_free_geos(priv); + iwl_legacy_free_channel_map(priv); out_unset_hw_params: iwl3945_unset_hw_params(priv); out_eeprom_free: - iwl_eeprom_free(priv); + iwl_legacy_eeprom_free(priv); out_iounmap: pci_iounmap(pdev, priv->hw_base); out_pci_release_regions: @@ -4174,7 +4138,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); out_ieee80211_free_hw: - iwl_free_traffic_mem(priv); + iwl_legacy_free_traffic_mem(priv); ieee80211_free_hw(priv->hw); out: return err; @@ -4190,10 +4154,12 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); - iwl_dbgfs_unregister(priv); + iwl_legacy_dbgfs_unregister(priv); set_bit(STATUS_EXIT_PENDING, &priv->status); + iwl_legacy_leds_exit(priv); + if (priv->mac80211_registered) { ieee80211_unregister_hw(priv->hw); priv->mac80211_registered = 0; @@ -4208,16 +4174,16 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) * paths to avoid running iwl_down() at all before leaving driver. * This (inexpensive) call *makes sure* device is reset. */ - iwl_apm_stop(priv); + iwl_legacy_apm_stop(priv); /* make sure we flush any pending irq or * tasklet for the driver */ spin_lock_irqsave(&priv->lock, flags); - iwl_disable_interrupts(priv); + iwl_legacy_disable_interrupts(priv); spin_unlock_irqrestore(&priv->lock, flags); - iwl_synchronize_irq(priv); + iwl3945_synchronize_irq(priv); sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); @@ -4239,7 +4205,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) * until now... */ destroy_workqueue(priv->workqueue); priv->workqueue = NULL; - iwl_free_traffic_mem(priv); + iwl_legacy_free_traffic_mem(priv); free_irq(pdev->irq, priv); pci_disable_msi(pdev); @@ -4249,8 +4215,8 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); - iwl_free_channel_map(priv); - iwlcore_free_geos(priv); + iwl_legacy_free_channel_map(priv); + iwl_legacy_free_geos(priv); kfree(priv->scan_cmd); if (priv->beacon_skb) dev_kfree_skb(priv->beacon_skb); @@ -4270,7 +4236,7 @@ static struct pci_driver iwl3945_driver = { .id_table = iwl3945_hw_card_ids, .probe = iwl3945_pci_probe, .remove = __devexit_p(iwl3945_pci_remove), - .driver.pm = IWL_PM_OPS, + .driver.pm = IWL_LEGACY_PM_OPS, }; static int __init iwl3945_init(void) @@ -4311,17 +4277,17 @@ module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO); MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); MODULE_PARM_DESC(swcrypto, - "using software crypto (default 1 [software])\n"); -#ifdef CONFIG_IWLWIFI_DEBUG -module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(debug, "debug output mask"); -#endif + "using software crypto (default 1 [software])"); module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, - int, S_IRUGO); + int, S_IRUGO); MODULE_PARM_DESC(disable_hw_scan, - "disable hardware scanning (default 0) (deprecated)"); -module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO); -MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); + "disable hardware scanning (default 0) (deprecated)"); +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif +module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); module_exit(iwl3945_exit); module_init(iwl3945_init); diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c new file mode 100644 index 000000000000..91b3d8b9d7a5 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c @@ -0,0 +1,3632 @@ +/****************************************************************************** + * + * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. + * + * Portions of this file are derived from the ipw3945 project, as well + * as portions of the ieee80211 subsystem header files. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/pci-aspm.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <linux/firmware.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> + +#include <net/mac80211.h> + +#include <asm/div64.h> + +#define DRV_NAME "iwl4965" + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-helpers.h" +#include "iwl-sta.h" +#include "iwl-4965-calib.h" +#include "iwl-4965.h" +#include "iwl-4965-led.h" + + +/****************************************************************************** + * + * module boiler plate + * + ******************************************************************************/ + +/* + * module name, copyright, version, etc. + */ +#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux" + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +#define VD "d" +#else +#define VD +#endif + +#define DRV_VERSION IWLWIFI_VERSION VD + + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_VERSION(DRV_VERSION); +MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("iwl4965"); + +void iwl4965_update_chain_flags(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + + if (priv->cfg->ops->hcmd->set_rxon_chain) { + for_each_context(priv, ctx) { + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + if (ctx->active.rx_chain != ctx->staging.rx_chain) + iwl_legacy_commit_rxon(priv, ctx); + } + } +} + +static void iwl4965_clear_free_frames(struct iwl_priv *priv) +{ + struct list_head *element; + + IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", + priv->frames_count); + + while (!list_empty(&priv->free_frames)) { + element = priv->free_frames.next; + list_del(element); + kfree(list_entry(element, struct iwl_frame, list)); + priv->frames_count--; + } + + if (priv->frames_count) { + IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", + priv->frames_count); + priv->frames_count = 0; + } +} + +static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + struct list_head *element; + if (list_empty(&priv->free_frames)) { + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) { + IWL_ERR(priv, "Could not allocate frame!\n"); + return NULL; + } + + priv->frames_count++; + return frame; + } + + element = priv->free_frames.next; + list_del(element); + return list_entry(element, struct iwl_frame, list); +} + +static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) +{ + memset(frame, 0, sizeof(*frame)); + list_add(&frame->list, &priv->free_frames); +} + +static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + int left) +{ + lockdep_assert_held(&priv->mutex); + + if (!priv->beacon_skb) + return 0; + + if (priv->beacon_skb->len > left) + return 0; + + memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len); + + return priv->beacon_skb->len; +} + +/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */ +static void iwl4965_set_beacon_tim(struct iwl_priv *priv, + struct iwl_tx_beacon_cmd *tx_beacon_cmd, + u8 *beacon, u32 frame_size) +{ + u16 tim_idx; + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon; + + /* + * The index is relative to frame start but we start looking at the + * variable-length part of the beacon. + */ + tim_idx = mgmt->u.beacon.variable - beacon; + + /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ + while ((tim_idx < (frame_size - 2)) && + (beacon[tim_idx] != WLAN_EID_TIM)) + tim_idx += beacon[tim_idx+1] + 2; + + /* If TIM field was found, set variables */ + if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) { + tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx); + tx_beacon_cmd->tim_size = beacon[tim_idx+1]; + } else + IWL_WARN(priv, "Unable to find TIM Element in beacon\n"); +} + +static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv, + struct iwl_frame *frame) +{ + struct iwl_tx_beacon_cmd *tx_beacon_cmd; + u32 frame_size; + u32 rate_flags; + u32 rate; + /* + * We have to set up the TX command, the TX Beacon command, and the + * beacon contents. + */ + + lockdep_assert_held(&priv->mutex); + + if (!priv->beacon_ctx) { + IWL_ERR(priv, "trying to build beacon w/o beacon context!\n"); + return 0; + } + + /* Initialize memory */ + tx_beacon_cmd = &frame->u.beacon; + memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); + + /* Set up TX beacon contents */ + frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame, + sizeof(frame->u) - sizeof(*tx_beacon_cmd)); + if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE)) + return 0; + if (!frame_size) + return 0; + + /* Set up TX command fields */ + tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); + tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id; + tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; + tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | + TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; + + /* Set up TX beacon command fields */ + iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame, + frame_size); + + /* Set up packet rate and flags */ + rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx); + priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant, + priv->hw_params.valid_tx_ant); + rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant); + if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE)) + rate_flags |= RATE_MCS_CCK_MSK; + tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, + rate_flags); + + return sizeof(*tx_beacon_cmd) + frame_size; +} + +int iwl4965_send_beacon_cmd(struct iwl_priv *priv) +{ + struct iwl_frame *frame; + unsigned int frame_size; + int rc; + + frame = iwl4965_get_free_frame(priv); + if (!frame) { + IWL_ERR(priv, "Could not obtain free frame buffer for beacon " + "command.\n"); + return -ENOMEM; + } + + frame_size = iwl4965_hw_get_beacon_cmd(priv, frame); + if (!frame_size) { + IWL_ERR(priv, "Error configuring the beacon command\n"); + iwl4965_free_frame(priv, frame); + return -EINVAL; + } + + rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, + &frame->u.cmd[0]); + + iwl4965_free_frame(priv, frame); + + return rc; +} + +static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + dma_addr_t addr = get_unaligned_le32(&tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + addr |= + ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; + + return addr; +} + +static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, + dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + hi_n_len |= ((addr >> 16) >> 16) & 0xF; + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + +static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd) +{ + return tfd->num_tbs & 0x1f; +} + +/** + * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @priv - driver private data + * @txq - tx queue + * + * Does NOT advance any TFD circular buffer read/write indexes + * Does NOT free the TFD itself (which is within circular buffer) + */ +void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds; + struct iwl_tfd *tfd; + struct pci_dev *dev = priv->pci_dev; + int index = txq->q.read_ptr; + int i; + int num_tbs; + + tfd = &tfd_tmp[index]; + + /* Sanity check on number of chunks */ + num_tbs = iwl4965_tfd_get_num_tbs(tfd); + + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (num_tbs) + pci_unmap_single(dev, + dma_unmap_addr(&txq->meta[index], mapping), + dma_unmap_len(&txq->meta[index], len), + PCI_DMA_BIDIRECTIONAL); + + /* Unmap chunks, if any. */ + for (i = 1; i < num_tbs; i++) + pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i), + iwl4965_tfd_tb_get_len(tfd, i), + PCI_DMA_TODEVICE); + + /* free SKB */ + if (txq->txb) { + struct sk_buff *skb; + + skb = txq->txb[txq->q.read_ptr].skb; + + /* can be called from irqs-disabled context */ + if (skb) { + dev_kfree_skb_any(skb); + txq->txb[txq->q.read_ptr].skb = NULL; + } + } +} + +int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, + u8 reset, u8 pad) +{ + struct iwl_queue *q; + struct iwl_tfd *tfd, *tfd_tmp; + u32 num_tbs; + + q = &txq->q; + tfd_tmp = (struct iwl_tfd *)txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + num_tbs = iwl4965_tfd_get_num_tbs(tfd); + + /* Each TFD can point to a maximum 20 Tx buffers */ + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Error can not send more than %d chunks\n", + IWL_NUM_OF_TBS); + return -EINVAL; + } + + BUG_ON(addr & ~DMA_BIT_MASK(36)); + if (unlikely(addr & ~IWL_TX_DMA_MASK)) + IWL_ERR(priv, "Unaligned address = %llx\n", + (unsigned long long)addr); + + iwl4965_tfd_set_tb(tfd, num_tbs, addr, len); + + return 0; +} + +/* + * Tell nic where to find circular buffer of Tx Frame Descriptors for + * given Tx queue, and enable the DMA channel used for that queue. + * + * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA + * channels supported in hardware. + */ +int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, + struct iwl_tx_queue *txq) +{ + int txq_id = txq->q.id; + + /* Circular buffer (TFD queue in DRAM) physical base address */ + iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + + return 0; +} + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ +static void iwl4965_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, + &pkt->u.alive_frame, + sizeof(struct iwl_init_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else + IWL_WARN(priv, "uCode did not respond OK.\n"); +} + +/** + * iwl4965_bg_statistics_periodic - Timer callback to queue statistics + * + * This callback is provided in order to send a statistics request. + * + * This timer function is continually reset to execute within + * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION + * was received. We need to ensure we receive the statistics in order + * to update the temperature used for calibrating the TXPOWER. + */ +static void iwl4965_bg_statistics_periodic(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + /* dont send host command if rf-kill is on */ + if (!iwl_legacy_is_ready_rf(priv)) + return; + + iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false); +} + + +static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base, + u32 start_idx, u32 num_events, + u32 mode) +{ + u32 i; + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + if (mode == 0) + ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32)); + else + ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); + + /* Make sure device is powered up for SRAM reads */ + spin_lock_irqsave(&priv->reg_lock, reg_flags); + if (iwl_grab_nic_access(priv)) { + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return; + } + + /* Set starting address; reads will auto-increment */ + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); + rmb(); + + /* + * "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. + */ + for (i = 0; i < num_events; i++) { + ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + trace_iwlwifi_legacy_dev_ucode_cont_event(priv, + 0, time, ev); + } else { + data = _iwl_legacy_read_direct32(priv, + HBUS_TARG_MEM_RDAT); + trace_iwlwifi_legacy_dev_ucode_cont_event(priv, + time, data, ev); + } + } + /* Allow device to power down */ + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); +} + +static void iwl4965_continuous_event_trace(struct iwl_priv *priv) +{ + u32 capacity; /* event log capacity in # entries */ + u32 base; /* SRAM byte address of event log header */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + + if (priv->ucode_type == UCODE_INIT) + base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); + else + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + capacity = iwl_legacy_read_targ_mem(priv, base); + num_wraps = iwl_legacy_read_targ_mem(priv, + base + (2 * sizeof(u32))); + mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32))); + next_entry = iwl_legacy_read_targ_mem(priv, + base + (3 * sizeof(u32))); + } else + return; + + if (num_wraps == priv->event_log.num_wraps) { + iwl4965_print_cont_event_trace(priv, + base, priv->event_log.next_entry, + next_entry - priv->event_log.next_entry, + mode); + priv->event_log.non_wraps_count++; + } else { + if ((num_wraps - priv->event_log.num_wraps) > 1) + priv->event_log.wraps_more_count++; + else + priv->event_log.wraps_once_count++; + trace_iwlwifi_legacy_dev_ucode_wrap_event(priv, + num_wraps - priv->event_log.num_wraps, + next_entry, priv->event_log.next_entry); + if (next_entry < priv->event_log.next_entry) { + iwl4965_print_cont_event_trace(priv, base, + priv->event_log.next_entry, + capacity - priv->event_log.next_entry, + mode); + + iwl4965_print_cont_event_trace(priv, base, 0, + next_entry, mode); + } else { + iwl4965_print_cont_event_trace(priv, base, + next_entry, capacity - next_entry, + mode); + + iwl4965_print_cont_event_trace(priv, base, 0, + next_entry, mode); + } + } + priv->event_log.num_wraps = num_wraps; + priv->event_log.next_entry = next_entry; +} + +/** + * iwl4965_bg_ucode_trace - Timer callback to log ucode event + * + * The timer is continually set to execute every + * UCODE_TRACE_PERIOD milliseconds after the last timer expired + * this function is to perform continuous uCode event logging operation + * if enabled + */ +static void iwl4965_bg_ucode_trace(unsigned long data) +{ + struct iwl_priv *priv = (struct iwl_priv *)data; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (priv->event_log.ucode_trace) { + iwl4965_continuous_event_trace(priv); + /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */ + mod_timer(&priv->ucode_trace, + jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD)); + } +} + +static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl4965_beacon_notif *beacon = + (struct iwl4965_beacon_notif *)pkt->u.raw; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " + "tsf %d %d rate %d\n", + le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); +} + +static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv) +{ + unsigned long flags; + + IWL_DEBUG_POWER(priv, "Stop all queues\n"); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + iwl_read32(priv, CSR_UCODE_DRV_GP1); + + spin_lock_irqsave(&priv->reg_lock, flags); + if (!iwl_grab_nic_access(priv)) + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, flags); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On", + (flags & CT_CARD_DISABLED) ? + "Reached" : "Not reached"); + + if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | + CT_CARD_DISABLED)) { + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + if (!(flags & RXON_CARD_DISABLED)) { + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + } + } + + if (flags & CT_CARD_DISABLED) + iwl4965_perform_ct_kill_task(priv); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + if (!(flags & RXON_CARD_DISABLED)) + iwl_legacy_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status))) + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + else + wake_up_interruptible(&priv->wait_command_queue); +} + +/** + * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + * + * This function chains into the hardware specific files for them to setup + * any hardware specific handlers as well. + */ +static void iwl4965_setup_rx_handlers(struct iwl_priv *priv) +{ + priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive; + priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error; + priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa; + priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = + iwl_legacy_rx_spectrum_measure_notif; + priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif; + priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = + iwl_legacy_rx_pm_debug_statistics_notif; + priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif; + + /* + * The same handler is used for both the REPLY to a discrete + * statistics request from the host as well as for the periodic + * statistics notifications (after received beacons) from the uCode. + */ + priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics; + priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics; + + iwl_legacy_setup_rx_scan_handlers(priv); + + /* status change handler */ + priv->rx_handlers[CARD_STATE_NOTIFICATION] = + iwl4965_rx_card_state_notif; + + priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = + iwl4965_rx_missed_beacon_notif; + /* Rx handlers */ + priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy; + priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx; + /* block ack */ + priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; + /* Set up hardware specific Rx handlers */ + priv->cfg->ops->lib->rx_handler_setup(priv); +} + +/** + * iwl4965_rx_handle - Main entry function for receiving responses from uCode + * + * Uses the priv->rx_handlers callback function array to invoke + * the appropriate handlers, including command responses, + * frame-received notifications, and other notifications. + */ +void iwl4965_rx_handle(struct iwl_priv *priv) +{ + struct iwl_rx_mem_buffer *rxb; + struct iwl_rx_packet *pkt; + struct iwl_rx_queue *rxq = &priv->rxq; + u32 r, i; + int reclaim; + unsigned long flags; + u8 fill_rx = 0; + u32 count = 8; + int total_empty; + + /* uCode's read index (stored in shared DRAM) indicates the last Rx + * buffer that the driver may process (last buffer filled by ucode). */ + r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; + i = rxq->read; + + /* Rx interrupt, but nothing sent from uCode */ + if (i == r) + IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); + + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + + while (i != r) { + int len; + + rxb = rxq->queue[i]; + + /* If an RXB doesn't have a Rx queue slot associated with it, + * then a bug has been introduced in the queue refilling + * routines -- catch it here */ + BUG_ON(rxb == NULL); + + rxq->queue[i] = NULL; + + pci_unmap_page(priv->pci_dev, rxb->page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + pkt = rxb_addr(rxb); + + len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + len += sizeof(u32); /* account for status word */ + trace_iwlwifi_legacy_dev_rx(priv, pkt, len); + + /* Reclaim a command buffer only if this packet is a response + * to a (driver-originated) command. + * If the packet (e.g. Rx frame) originated from uCode, + * there is no command buffer to reclaim. + * Ucode should set SEQ_RX_FRAME bit if ucode-originated, + * but apparently a few don't get set; catch them here. */ + reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && + (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && + (pkt->hdr.cmd != REPLY_RX) && + (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) && + (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && + (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && + (pkt->hdr.cmd != REPLY_TX); + + /* Based on type of command response or notification, + * handle those that need handling via function in + * rx_handlers table. See iwl4965_setup_rx_handlers() */ + if (priv->rx_handlers[pkt->hdr.cmd]) { + IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, + i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; + priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); + } else { + /* No handling needed */ + IWL_DEBUG_RX(priv, + "r %d i %d No handler needed for %s, 0x%02x\n", + r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), + pkt->hdr.cmd); + } + + /* + * XXX: After here, we should always check rxb->page + * against NULL before touching it or its virtual + * memory (pkt). Because some rx_handler might have + * already taken or freed the pages. + */ + + if (reclaim) { + /* Invoke any callbacks, transfer the buffer to caller, + * and fire off the (possibly) blocking iwl_legacy_send_cmd() + * as we reclaim the driver command queue */ + if (rxb->page) + iwl_legacy_tx_cmd_complete(priv, rxb); + else + IWL_WARN(priv, "Claim null rxb?\n"); + } + + /* Reuse the page if possible. For notification packets and + * SKBs that fail to Rx correctly, add them back into the + * rx_free list for reuse later. */ + spin_lock_irqsave(&rxq->lock, flags); + if (rxb->page != NULL) { + rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, + 0, PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + list_add_tail(&rxb->list, &rxq->rx_free); + rxq->free_count++; + } else + list_add_tail(&rxb->list, &rxq->rx_used); + + spin_unlock_irqrestore(&rxq->lock, flags); + + i = (i + 1) & RX_QUEUE_MASK; + /* If there are a lot of unused frames, + * restock the Rx queue so ucode wont assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + iwl4965_rx_replenish_now(priv); + count = 0; + } + } + } + + /* Backtrack one entry */ + rxq->read = i; + if (fill_rx) + iwl4965_rx_replenish_now(priv); + else + iwl4965_rx_queue_restock(priv); +} + +/* call this function to flush any scheduled tasklet */ +static inline void iwl4965_synchronize_irq(struct iwl_priv *priv) +{ + /* wait to make sure we flush pending tasklet*/ + synchronize_irq(priv->pci_dev->irq); + tasklet_kill(&priv->irq_tasklet); +} + +static void iwl4965_irq_tasklet(struct iwl_priv *priv) +{ + u32 inta, handled = 0; + u32 inta_fh; + unsigned long flags; + u32 i; +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + u32 inta_mask; +#endif + + spin_lock_irqsave(&priv->lock, flags); + + /* Ack/clear/reset pending uCode interrupts. + * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, + * and will clear only when CSR_FH_INT_STATUS gets cleared. */ + inta = iwl_read32(priv, CSR_INT); + iwl_write32(priv, CSR_INT, inta); + + /* Ack/clear/reset pending flow-handler (DMA) interrupts. + * Any new interrupts that happen after this, either while we're + * in this tasklet, or later, will show up in next ISR/tasklet. */ + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) { + /* just for debug */ + inta_mask = iwl_read32(priv, CSR_INT_MASK); + IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", + inta, inta_mask, inta_fh); + } +#endif + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not + * atomic, make sure that inta covers all the interrupts that + * we've discovered, even if FH interrupt came in just after + * reading CSR_INT. */ + if (inta_fh & CSR49_FH_INT_RX_MASK) + inta |= CSR_INT_BIT_FH_RX; + if (inta_fh & CSR49_FH_INT_TX_MASK) + inta |= CSR_INT_BIT_FH_TX; + + /* Now service all interrupt bits discovered above. */ + if (inta & CSR_INT_BIT_HW_ERR) { + IWL_ERR(priv, "Hardware error detected. Restarting.\n"); + + /* Tell the device to stop sending interrupts */ + iwl_legacy_disable_interrupts(priv); + + priv->isr_stats.hw++; + iwl_legacy_irq_handle_error(priv); + + handled |= CSR_INT_BIT_HW_ERR; + + return; + } + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { + /* NIC fires this, but we don't use it, redundant with WAKEUP */ + if (inta & CSR_INT_BIT_SCD) { + IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " + "the frame/frames.\n"); + priv->isr_stats.sch++; + } + + /* Alive notification via Rx interrupt will do the real work */ + if (inta & CSR_INT_BIT_ALIVE) { + IWL_DEBUG_ISR(priv, "Alive interrupt\n"); + priv->isr_stats.alive++; + } + } +#endif + /* Safely ignore these bits for debug checks below */ + inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); + + /* HW RF KILL switch toggled */ + if (inta & CSR_INT_BIT_RF_KILL) { + int hw_rf_kill = 0; + if (!(iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) + hw_rf_kill = 1; + + IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", + hw_rf_kill ? "disable radio" : "enable radio"); + + priv->isr_stats.rfkill++; + + /* driver only loads ucode once setting the interface up. + * the driver allows loading the ucode even if the radio + * is killed. Hence update the killswitch state here. The + * rfkill handler will care about restarting if needed. + */ + if (!test_bit(STATUS_ALIVE, &priv->status)) { + if (hw_rf_kill) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); + } + + handled |= CSR_INT_BIT_RF_KILL; + } + + /* Chip got too hot and stopped itself */ + if (inta & CSR_INT_BIT_CT_KILL) { + IWL_ERR(priv, "Microcode CT kill error detected.\n"); + priv->isr_stats.ctkill++; + handled |= CSR_INT_BIT_CT_KILL; + } + + /* Error detected by uCode */ + if (inta & CSR_INT_BIT_SW_ERR) { + IWL_ERR(priv, "Microcode SW error detected. " + " Restarting 0x%X.\n", inta); + priv->isr_stats.sw++; + iwl_legacy_irq_handle_error(priv); + handled |= CSR_INT_BIT_SW_ERR; + } + + /* + * uCode wakes up after power-down sleep. + * Tell device about any new tx or host commands enqueued, + * and about any Rx buffers made available while asleep. + */ + if (inta & CSR_INT_BIT_WAKEUP) { + IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); + iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq); + for (i = 0; i < priv->hw_params.max_txq_num; i++) + iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]); + priv->isr_stats.wakeup++; + handled |= CSR_INT_BIT_WAKEUP; + } + + /* All uCode command responses, including Tx command responses, + * Rx "responses" (frame-received notification), and other + * notifications from uCode come through here*/ + if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { + iwl4965_rx_handle(priv); + priv->isr_stats.rx++; + handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); + } + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta & CSR_INT_BIT_FH_TX) { + IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); + priv->isr_stats.tx++; + handled |= CSR_INT_BIT_FH_TX; + /* Wake up uCode load routine, now that load is complete */ + priv->ucode_write_complete = 1; + wake_up_interruptible(&priv->wait_command_queue); + } + + if (inta & ~handled) { + IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); + priv->isr_stats.unhandled++; + } + + if (inta & ~(priv->inta_mask)) { + IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", + inta & ~priv->inta_mask); + IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); + } + + /* Re-enable all interrupts */ + /* only Re-enable if diabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &priv->status)) + iwl_legacy_enable_interrupts(priv); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { + inta = iwl_read32(priv, CSR_INT); + inta_mask = iwl_read32(priv, CSR_INT_MASK); + inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); + IWL_DEBUG_ISR(priv, + "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " + "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); + } +#endif +} + +/***************************************************************************** + * + * sysfs attributes + * + *****************************************************************************/ + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + +/* + * The following adds a new attribute to the sysfs representation + * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/) + * used for controlling the debug level. + * + * See the level definitions in iwl for details. + * + * The debug_level being managed using sysfs below is a per device debug + * level that is used instead of the global debug level if it (the per + * device debug level) is set. + */ +static ssize_t iwl4965_show_debug_level(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv)); +} +static ssize_t iwl4965_store_debug_level(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 0, &val); + if (ret) + IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf); + else { + priv->debug_level = val; + if (iwl_legacy_alloc_traffic_mem(priv)) + IWL_ERR(priv, + "Not enough memory to generate traffic log\n"); + } + return strnlen(buf, count); +} + +static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, + iwl4965_show_debug_level, iwl4965_store_debug_level); + + +#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ + + +static ssize_t iwl4965_show_temperature(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_legacy_is_alive(priv)) + return -EAGAIN; + + return sprintf(buf, "%d\n", priv->temperature); +} + +static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL); + +static ssize_t iwl4965_show_tx_power(struct device *d, + struct device_attribute *attr, char *buf) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + + if (!iwl_legacy_is_ready_rf(priv)) + return sprintf(buf, "off\n"); + else + return sprintf(buf, "%d\n", priv->tx_power_user_lmt); +} + +static ssize_t iwl4965_store_tx_power(struct device *d, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct iwl_priv *priv = dev_get_drvdata(d); + unsigned long val; + int ret; + + ret = strict_strtoul(buf, 10, &val); + if (ret) + IWL_INFO(priv, "%s is not in decimal form.\n", buf); + else { + ret = iwl_legacy_set_tx_power(priv, val, false); + if (ret) + IWL_ERR(priv, "failed setting tx power (0x%d).\n", + ret); + else + ret = count; + } + return ret; +} + +static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, + iwl4965_show_tx_power, iwl4965_store_tx_power); + +static struct attribute *iwl_sysfs_entries[] = { + &dev_attr_temperature.attr, + &dev_attr_tx_power.attr, +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + &dev_attr_debug_level.attr, +#endif + NULL +}; + +static struct attribute_group iwl_attribute_group = { + .name = NULL, /* put in device directory */ + .attrs = iwl_sysfs_entries, +}; + +/****************************************************************************** + * + * uCode download functions + * + ******************************************************************************/ + +static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv) +{ + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); + iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot); +} + +static void iwl4965_nic_start(struct iwl_priv *priv) +{ + /* Remove all resets to allow NIC to operate */ + iwl_write32(priv, CSR_RESET, 0); +} + +static void iwl4965_ucode_callback(const struct firmware *ucode_raw, + void *context); +static int iwl4965_mac_setup_register(struct iwl_priv *priv, + u32 max_probe_length); + +static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first) +{ + const char *name_pre = priv->cfg->fw_name_pre; + char tag[8]; + + if (first) { + priv->fw_index = priv->cfg->ucode_api_max; + sprintf(tag, "%d", priv->fw_index); + } else { + priv->fw_index--; + sprintf(tag, "%d", priv->fw_index); + } + + if (priv->fw_index < priv->cfg->ucode_api_min) { + IWL_ERR(priv, "no suitable firmware found!\n"); + return -ENOENT; + } + + sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode"); + + IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n", + priv->firmware_name); + + return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name, + &priv->pci_dev->dev, GFP_KERNEL, priv, + iwl4965_ucode_callback); +} + +struct iwl4965_firmware_pieces { + const void *inst, *data, *init, *init_data, *boot; + size_t inst_size, data_size, init_size, init_data_size, boot_size; +}; + +static int iwl4965_load_firmware(struct iwl_priv *priv, + const struct firmware *ucode_raw, + struct iwl4965_firmware_pieces *pieces) +{ + struct iwl_ucode_header *ucode = (void *)ucode_raw->data; + u32 api_ver, hdr_size; + const u8 *src; + + priv->ucode_ver = le32_to_cpu(ucode->ver); + api_ver = IWL_UCODE_API(priv->ucode_ver); + + switch (api_ver) { + default: + case 0: + case 1: + case 2: + hdr_size = 24; + if (ucode_raw->size < hdr_size) { + IWL_ERR(priv, "File size too small!\n"); + return -EINVAL; + } + pieces->inst_size = le32_to_cpu(ucode->v1.inst_size); + pieces->data_size = le32_to_cpu(ucode->v1.data_size); + pieces->init_size = le32_to_cpu(ucode->v1.init_size); + pieces->init_data_size = + le32_to_cpu(ucode->v1.init_data_size); + pieces->boot_size = le32_to_cpu(ucode->v1.boot_size); + src = ucode->v1.data; + break; + } + + /* Verify size of file vs. image size info in file's header */ + if (ucode_raw->size != hdr_size + pieces->inst_size + + pieces->data_size + pieces->init_size + + pieces->init_data_size + pieces->boot_size) { + + IWL_ERR(priv, + "uCode file size %d does not match expected size\n", + (int)ucode_raw->size); + return -EINVAL; + } + + pieces->inst = src; + src += pieces->inst_size; + pieces->data = src; + src += pieces->data_size; + pieces->init = src; + src += pieces->init_size; + pieces->init_data = src; + src += pieces->init_data_size; + pieces->boot = src; + src += pieces->boot_size; + + return 0; +} + +/** + * iwl4965_ucode_callback - callback when firmware was loaded + * + * If loaded successfully, copies the firmware into buffers + * for the card to fetch (via DMA). + */ +static void +iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context) +{ + struct iwl_priv *priv = context; + struct iwl_ucode_header *ucode; + int err; + struct iwl4965_firmware_pieces pieces; + const unsigned int api_max = priv->cfg->ucode_api_max; + const unsigned int api_min = priv->cfg->ucode_api_min; + u32 api_ver; + + u32 max_probe_length = 200; + u32 standard_phy_calibration_size = + IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; + + memset(&pieces, 0, sizeof(pieces)); + + if (!ucode_raw) { + if (priv->fw_index <= priv->cfg->ucode_api_max) + IWL_ERR(priv, + "request for firmware file '%s' failed.\n", + priv->firmware_name); + goto try_again; + } + + IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n", + priv->firmware_name, ucode_raw->size); + + /* Make sure that we got at least the API version number */ + if (ucode_raw->size < 4) { + IWL_ERR(priv, "File size way too small!\n"); + goto try_again; + } + + /* Data from ucode file: header followed by uCode images */ + ucode = (struct iwl_ucode_header *)ucode_raw->data; + + err = iwl4965_load_firmware(priv, ucode_raw, &pieces); + + if (err) + goto try_again; + + api_ver = IWL_UCODE_API(priv->ucode_ver); + + /* + * api_ver should match the api version forming part of the + * firmware filename ... but we don't check for that and only rely + * on the API version read from firmware header from here on forward + */ + if (api_ver < api_min || api_ver > api_max) { + IWL_ERR(priv, + "Driver unable to support your firmware API. " + "Driver supports v%u, firmware is v%u.\n", + api_max, api_ver); + goto try_again; + } + + if (api_ver != api_max) + IWL_ERR(priv, + "Firmware has old API version. Expected v%u, " + "got v%u. New firmware can be obtained " + "from http://www.intellinuxwireless.org.\n", + api_max, api_ver); + + IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + snprintf(priv->hw->wiphy->fw_version, + sizeof(priv->hw->wiphy->fw_version), + "%u.%u.%u.%u", + IWL_UCODE_MAJOR(priv->ucode_ver), + IWL_UCODE_MINOR(priv->ucode_ver), + IWL_UCODE_API(priv->ucode_ver), + IWL_UCODE_SERIAL(priv->ucode_ver)); + + /* + * For any of the failures below (before allocating pci memory) + * we will try to load a version with a smaller API -- maybe the + * user just got a corrupted version of the latest API. + */ + + IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", + priv->ucode_ver); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n", + pieces.inst_size); + IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n", + pieces.data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n", + pieces.init_size); + IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n", + pieces.init_data_size); + IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n", + pieces.boot_size); + + /* Verify that uCode images will fit in card's SRAM */ + if (pieces.inst_size > priv->hw_params.max_inst_size) { + IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n", + pieces.inst_size); + goto try_again; + } + + if (pieces.data_size > priv->hw_params.max_data_size) { + IWL_ERR(priv, "uCode data len %Zd too large to fit in\n", + pieces.data_size); + goto try_again; + } + + if (pieces.init_size > priv->hw_params.max_inst_size) { + IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n", + pieces.init_size); + goto try_again; + } + + if (pieces.init_data_size > priv->hw_params.max_data_size) { + IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n", + pieces.init_data_size); + goto try_again; + } + + if (pieces.boot_size > priv->hw_params.max_bsm_size) { + IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n", + pieces.boot_size); + goto try_again; + } + + /* Allocate ucode buffers for card's bus-master loading ... */ + + /* Runtime instructions and 2 copies of data: + * 1) unmodified from disk + * 2) backup cache for save/restore during power-downs */ + priv->ucode_code.len = pieces.inst_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); + + priv->ucode_data.len = pieces.data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); + + priv->ucode_data_backup.len = pieces.data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); + + if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || + !priv->ucode_data_backup.v_addr) + goto err_pci_alloc; + + /* Initialization instructions and data */ + if (pieces.init_size && pieces.init_data_size) { + priv->ucode_init.len = pieces.init_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); + + priv->ucode_init_data.len = pieces.init_data_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); + + if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) + goto err_pci_alloc; + } + + /* Bootstrap (instructions only, no data) */ + if (pieces.boot_size) { + priv->ucode_boot.len = pieces.boot_size; + iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); + + if (!priv->ucode_boot.v_addr) + goto err_pci_alloc; + } + + /* Now that we can no longer fail, copy information */ + + priv->sta_key_max_num = STA_KEY_MAX_NUM; + + /* Copy images into buffers for card's bus-master reads ... */ + + /* Runtime instructions (first block of data in file) */ + IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", + pieces.inst_size); + memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size); + + IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", + priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); + + /* + * Runtime data + * NOTE: Copy into backup buffer will be done in iwl_up() + */ + IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", + pieces.data_size); + memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size); + memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size); + + /* Initialization instructions */ + if (pieces.init_size) { + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init instr len %Zd\n", + pieces.init_size); + memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size); + } + + /* Initialization data */ + if (pieces.init_data_size) { + IWL_DEBUG_INFO(priv, + "Copying (but not loading) init data len %Zd\n", + pieces.init_data_size); + memcpy(priv->ucode_init_data.v_addr, pieces.init_data, + pieces.init_data_size); + } + + /* Bootstrap instructions */ + IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", + pieces.boot_size); + memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size); + + /* + * figure out the offset of chain noise reset and gain commands + * base on the size of standard phy calibration commands table size + */ + priv->_4965.phy_calib_chain_noise_reset_cmd = + standard_phy_calibration_size; + priv->_4965.phy_calib_chain_noise_gain_cmd = + standard_phy_calibration_size + 1; + + /************************************************** + * This is still part of probe() in a sense... + * + * 9. Setup and register with mac80211 and debugfs + **************************************************/ + err = iwl4965_mac_setup_register(priv, max_probe_length); + if (err) + goto out_unbind; + + err = iwl_legacy_dbgfs_register(priv, DRV_NAME); + if (err) + IWL_ERR(priv, + "failed to create debugfs files. Ignoring error: %d\n", err); + + err = sysfs_create_group(&priv->pci_dev->dev.kobj, + &iwl_attribute_group); + if (err) { + IWL_ERR(priv, "failed to create sysfs device attributes\n"); + goto out_unbind; + } + + /* We have our copies now, allow OS release its copies */ + release_firmware(ucode_raw); + complete(&priv->_4965.firmware_loading_complete); + return; + + try_again: + /* try next, if any */ + if (iwl4965_request_firmware(priv, false)) + goto out_unbind; + release_firmware(ucode_raw); + return; + + err_pci_alloc: + IWL_ERR(priv, "failed to allocate pci memory\n"); + iwl4965_dealloc_ucode_pci(priv); + out_unbind: + complete(&priv->_4965.firmware_loading_complete); + device_release_driver(&priv->pci_dev->dev); + release_firmware(ucode_raw); +} + +static const char * const desc_lookup_text[] = { + "OK", + "FAIL", + "BAD_PARAM", + "BAD_CHECKSUM", + "NMI_INTERRUPT_WDG", + "SYSASSERT", + "FATAL_ERROR", + "BAD_COMMAND", + "HW_ERROR_TUNE_LOCK", + "HW_ERROR_TEMPERATURE", + "ILLEGAL_CHAN_FREQ", + "VCC_NOT_STABLE", + "FH_ERROR", + "NMI_INTERRUPT_HOST", + "NMI_INTERRUPT_ACTION_PT", + "NMI_INTERRUPT_UNKNOWN", + "UCODE_VERSION_MISMATCH", + "HW_ERROR_ABS_LOCK", + "HW_ERROR_CAL_LOCK_FAIL", + "NMI_INTERRUPT_INST_ACTION_PT", + "NMI_INTERRUPT_DATA_ACTION_PT", + "NMI_TRM_HW_ER", + "NMI_INTERRUPT_TRM", + "NMI_INTERRUPT_BREAK_POINT" + "DEBUG_0", + "DEBUG_1", + "DEBUG_2", + "DEBUG_3", +}; + +static struct { char *name; u8 num; } advanced_lookup[] = { + { "NMI_INTERRUPT_WDG", 0x34 }, + { "SYSASSERT", 0x35 }, + { "UCODE_VERSION_MISMATCH", 0x37 }, + { "BAD_COMMAND", 0x38 }, + { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C }, + { "FATAL_ERROR", 0x3D }, + { "NMI_TRM_HW_ERR", 0x46 }, + { "NMI_INTERRUPT_TRM", 0x4C }, + { "NMI_INTERRUPT_BREAK_POINT", 0x54 }, + { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, + { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, + { "NMI_INTERRUPT_HOST", 0x66 }, + { "NMI_INTERRUPT_ACTION_PT", 0x7C }, + { "NMI_INTERRUPT_UNKNOWN", 0x84 }, + { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, + { "ADVANCED_SYSASSERT", 0 }, +}; + +static const char *iwl4965_desc_lookup(u32 num) +{ + int i; + int max = ARRAY_SIZE(desc_lookup_text); + + if (num < max) + return desc_lookup_text[num]; + + max = ARRAY_SIZE(advanced_lookup) - 1; + for (i = 0; i < max; i++) { + if (advanced_lookup[i].num == num) + break; + } + return advanced_lookup[i].name; +} + +#define ERROR_START_OFFSET (1 * sizeof(u32)) +#define ERROR_ELEM_SIZE (7 * sizeof(u32)) + +void iwl4965_dump_nic_error_log(struct iwl_priv *priv) +{ + u32 data2, line; + u32 desc, time, count, base, data1; + u32 blink1, blink2, ilink1, ilink2; + u32 pc, hcmd; + + if (priv->ucode_type == UCODE_INIT) { + base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); + } else { + base = le32_to_cpu(priv->card_alive.error_event_table_ptr); + } + + if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Not valid error log pointer 0x%08X for %s uCode\n", + base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); + return; + } + + count = iwl_legacy_read_targ_mem(priv, base); + + if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + IWL_ERR(priv, "Start IWL Error Log Dump:\n"); + IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", + priv->status, count); + } + + desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32)); + priv->isr_stats.err_code = desc; + pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32)); + blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32)); + blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32)); + ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32)); + ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32)); + data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32)); + data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32)); + line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32)); + time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32)); + hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32)); + + trace_iwlwifi_legacy_dev_ucode_error(priv, desc, + time, data1, data2, line, + blink1, blink2, ilink1, ilink2); + + IWL_ERR(priv, "Desc Time " + "data1 data2 line\n"); + IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", + iwl4965_desc_lookup(desc), desc, time, data1, data2, line); + IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n"); + IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", + pc, blink1, blink2, ilink1, ilink2, hcmd); +} + +#define EVENT_START_OFFSET (4 * sizeof(u32)) + +/** + * iwl4965_print_event_log - Dump error event log to syslog + * + */ +static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx, + u32 num_events, u32 mode, + int pos, char **buf, size_t bufsz) +{ + u32 i; + u32 base; /* SRAM byte address of event log header */ + u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ + u32 ptr; /* SRAM byte address of log data */ + u32 ev, time, data; /* event log data */ + unsigned long reg_flags; + + if (num_events == 0) + return pos; + + if (priv->ucode_type == UCODE_INIT) { + base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); + } else { + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + } + + if (mode == 0) + event_size = 2 * sizeof(u32); + else + event_size = 3 * sizeof(u32); + + ptr = base + EVENT_START_OFFSET + (start_idx * event_size); + + /* Make sure device is powered up for SRAM reads */ + spin_lock_irqsave(&priv->reg_lock, reg_flags); + iwl_grab_nic_access(priv); + + /* Set starting address; reads will auto-increment */ + _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); + rmb(); + + /* "time" is actually "data" for mode 0 (no timestamp). + * place event id # at far right for easier visual parsing. */ + for (i = 0; i < num_events; i++) { + ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); + if (mode == 0) { + /* data, ev */ + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOG:0x%08x:%04u\n", + time, ev); + } else { + trace_iwlwifi_legacy_dev_ucode_event(priv, 0, + time, ev); + IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", + time, ev); + } + } else { + data = _iwl_legacy_read_direct32(priv, + HBUS_TARG_MEM_RDAT); + if (bufsz) { + pos += scnprintf(*buf + pos, bufsz - pos, + "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + } else { + IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", + time, data, ev); + trace_iwlwifi_legacy_dev_ucode_event(priv, time, + data, ev); + } + } + } + + /* Allow device to power down */ + iwl_release_nic_access(priv); + spin_unlock_irqrestore(&priv->reg_lock, reg_flags); + return pos; +} + +/** + * iwl4965_print_last_event_logs - Dump the newest # of event log to syslog + */ +static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity, + u32 num_wraps, u32 next_entry, + u32 size, u32 mode, + int pos, char **buf, size_t bufsz) +{ + /* + * display the newest DEFAULT_LOG_ENTRIES entries + * i.e the entries just before the next ont that uCode would fill. + */ + if (num_wraps) { + if (next_entry < size) { + pos = iwl4965_print_event_log(priv, + capacity - (size - next_entry), + size - next_entry, mode, + pos, buf, bufsz); + pos = iwl4965_print_event_log(priv, 0, + next_entry, mode, + pos, buf, bufsz); + } else + pos = iwl4965_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } else { + if (next_entry < size) { + pos = iwl4965_print_event_log(priv, 0, next_entry, + mode, pos, buf, bufsz); + } else { + pos = iwl4965_print_event_log(priv, next_entry - size, + size, mode, pos, buf, bufsz); + } + } + return pos; +} + +#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) + +int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log, + char **buf, bool display) +{ + u32 base; /* SRAM byte address of event log header */ + u32 capacity; /* event log capacity in # entries */ + u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ + u32 num_wraps; /* # times uCode wrapped to top of log */ + u32 next_entry; /* index of next entry to be written by uCode */ + u32 size; /* # entries that we'll print */ + int pos = 0; + size_t bufsz = 0; + + if (priv->ucode_type == UCODE_INIT) { + base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr); + } else { + base = le32_to_cpu(priv->card_alive.log_event_table_ptr); + } + + if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) { + IWL_ERR(priv, + "Invalid event log pointer 0x%08X for %s uCode\n", + base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT"); + return -EINVAL; + } + + /* event log header */ + capacity = iwl_legacy_read_targ_mem(priv, base); + mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32))); + num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32))); + next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32))); + + size = num_wraps ? capacity : next_entry; + + /* bail out if nothing in log */ + if (size == 0) { + IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); + return pos; + } + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) + size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; +#else + size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES) + ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size; +#endif + IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n", + size); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG + if (display) { + if (full_log) + bufsz = capacity * 48; + else + bufsz = size * 48; + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + } + if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { + /* + * if uCode has wrapped back to top of log, + * start at the oldest entry, + * i.e the next one that uCode would fill. + */ + if (num_wraps) + pos = iwl4965_print_event_log(priv, next_entry, + capacity - next_entry, mode, + pos, buf, bufsz); + /* (then/else) start at top of log */ + pos = iwl4965_print_event_log(priv, 0, + next_entry, mode, pos, buf, bufsz); + } else + pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#else + pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps, + next_entry, size, mode, + pos, buf, bufsz); +#endif + return pos; +} + +static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv) +{ + struct iwl_ct_kill_config cmd; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&priv->lock, flags); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); + spin_unlock_irqrestore(&priv->lock, flags); + + cmd.critical_temperature_R = + cpu_to_le32(priv->hw_params.ct_kill_threshold); + + ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD, + sizeof(cmd), &cmd); + if (ret) + IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); + else + IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " + "succeeded, " + "critical temperature is %d\n", + priv->hw_params.ct_kill_threshold); +} + +static const s8 default_queue_to_tx_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, + IWL49_CMD_FIFO_NUM, + IWL_TX_FIFO_UNUSED, + IWL_TX_FIFO_UNUSED, +}; + +static int iwl4965_alive_notify(struct iwl_priv *priv) +{ + u32 a; + unsigned long flags; + int i, chan; + u32 reg_val; + + spin_lock_irqsave(&priv->lock, flags); + + /* Clear 4965's internal Tx Scheduler data base */ + priv->scd_base_addr = iwl_legacy_read_prph(priv, + IWL49_SCD_SRAM_BASE_ADDR); + a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET; + for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4) + iwl_legacy_write_targ_mem(priv, a, 0); + for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) + iwl_legacy_write_targ_mem(priv, a, 0); + for (; a < priv->scd_base_addr + + IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) + iwl_legacy_write_targ_mem(priv, a, 0); + + /* Tel 4965 where to find Tx byte count tables */ + iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR, + priv->scd_bc_tbls.dma >> 10); + + /* Enable DMA channel */ + for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++) + iwl_legacy_write_direct32(priv, + FH_TCSR_CHNL_TX_CONFIG_REG(chan), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); + + /* Update FH chicken bits */ + reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); + iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, + reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); + + /* Disable chain mode for all queues */ + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0); + + /* Initialize each Tx queue (including the command queue) */ + for (i = 0; i < priv->hw_params.max_txq_num; i++) { + + /* TFD circular buffer read/write indexes */ + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0); + iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); + + /* Max Tx Window size for Scheduler-ACK mode */ + iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(i), + (SCD_WIN_SIZE << + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & + IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); + + /* Frame limit */ + iwl_legacy_write_targ_mem(priv, priv->scd_base_addr + + IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) + + sizeof(u32), + (SCD_FRAME_LIMIT << + IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & + IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); + + } + iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK, + (1 << priv->hw_params.max_txq_num) - 1); + + /* Activate all Tx DMA/FIFO channels */ + iwl4965_txq_set_sched(priv, IWL_MASK(0, 6)); + + iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0); + + /* make sure all queue are not stopped */ + memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); + for (i = 0; i < 4; i++) + atomic_set(&priv->queue_stop_count[i], 0); + + /* reset to 0 to enable all the queue first */ + priv->txq_ctx_active_msk = 0; + /* Map each Tx/cmd queue to its corresponding fifo */ + BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7); + + for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { + int ac = default_queue_to_tx_fifo[i]; + + iwl_txq_ctx_activate(priv, i); + + if (ac == IWL_TX_FIFO_UNUSED) + continue; + + iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + return 0; +} + +/** + * iwl4965_alive_start - called after REPLY_ALIVE notification received + * from protocol/runtime uCode (initialization uCode's + * Alive gets handled by iwl_init_alive_start()). + */ +static void iwl4965_alive_start(struct iwl_priv *priv) +{ + int ret = 0; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + + if (priv->card_alive.is_valid != UCODE_VALID_OK) { + /* We had an error bringing up the hardware, so take it + * all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Alive failed.\n"); + goto restart; + } + + /* Initialize uCode has loaded Runtime uCode ... verify inst image. + * This is a paranoid check, because we would not have gotten the + * "runtime" alive if code weren't properly loaded. */ + if (iwl4965_verify_ucode(priv)) { + /* Runtime instruction load was bad; + * take it all the way back down so we can try again */ + IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); + goto restart; + } + + ret = iwl4965_alive_notify(priv); + if (ret) { + IWL_WARN(priv, + "Could not complete ALIVE transition [ntf]: %d\n", ret); + goto restart; + } + + + /* After the ALIVE response, we can send host commands to the uCode */ + set_bit(STATUS_ALIVE, &priv->status); + + /* Enable watchdog to monitor the driver tx queues */ + iwl_legacy_setup_watchdog(priv); + + if (iwl_legacy_is_rfkill(priv)) + return; + + ieee80211_wake_queues(priv->hw); + + priv->active_rate = IWL_RATES_MASK; + + if (iwl_legacy_is_associated_ctx(ctx)) { + struct iwl_legacy_rxon_cmd *active_rxon = + (struct iwl_legacy_rxon_cmd *)&ctx->active; + /* apply any changes in staging */ + ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; + active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; + } else { + struct iwl_rxon_context *tmp; + /* Initialize our rx_config data */ + for_each_context(priv, tmp) + iwl_legacy_connection_init_rx_config(priv, tmp); + + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); + } + + /* Configure bluetooth coexistence if enabled */ + iwl_legacy_send_bt_config(priv); + + iwl4965_reset_run_time_calib(priv); + + set_bit(STATUS_READY, &priv->status); + + /* Configure the adapter for unassociated operation */ + iwl_legacy_commit_rxon(priv, ctx); + + /* At this point, the NIC is initialized and operational */ + iwl4965_rf_kill_ct_config(priv); + + IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); + wake_up_interruptible(&priv->wait_command_queue); + + iwl_legacy_power_update_mode(priv, true); + IWL_DEBUG_INFO(priv, "Updated power mode\n"); + + return; + + restart: + queue_work(priv->workqueue, &priv->restart); +} + +static void iwl4965_cancel_deferred_work(struct iwl_priv *priv); + +static void __iwl4965_down(struct iwl_priv *priv) +{ + unsigned long flags; + int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); + + IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); + + iwl_legacy_scan_cancel_timeout(priv, 200); + + exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); + + /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set + * to prevent rearm timer */ + del_timer_sync(&priv->watchdog); + + iwl_legacy_clear_ucode_stations(priv, NULL); + iwl_legacy_dealloc_bcast_stations(priv); + iwl_legacy_clear_driver_stations(priv); + + /* Unblock any waiting calls */ + wake_up_interruptible_all(&priv->wait_command_queue); + + /* Wipe out the EXIT_PENDING status bit if we are not actually + * exiting the module */ + if (!exit_pending) + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* stop and reset the on-board processor */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + /* tell the device to stop sending interrupts */ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + iwl4965_synchronize_irq(priv); + + if (priv->mac80211_registered) + ieee80211_stop_queues(priv->hw); + + /* If we have not previously called iwl_init() then + * clear all bits but the RF Kill bit and return */ + if (!iwl_legacy_is_init(priv)) { + priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + goto exit; + } + + /* ...otherwise clear out all the status bits but the RF Kill + * bit and continue taking the NIC down. */ + priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << + STATUS_RF_KILL_HW | + test_bit(STATUS_GEO_CONFIGURED, &priv->status) << + STATUS_GEO_CONFIGURED | + test_bit(STATUS_FW_ERROR, &priv->status) << + STATUS_FW_ERROR | + test_bit(STATUS_EXIT_PENDING, &priv->status) << + STATUS_EXIT_PENDING; + + iwl4965_txq_ctx_stop(priv); + iwl4965_rxq_stop(priv); + + /* Power-down device's busmaster DMA clocks */ + iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); + udelay(5); + + /* Make sure (redundant) we've released our request to stay awake */ + iwl_legacy_clear_bit(priv, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + + /* Stop the device, and put it in low power state */ + iwl_legacy_apm_stop(priv); + + exit: + memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); + + dev_kfree_skb(priv->beacon_skb); + priv->beacon_skb = NULL; + + /* clear out any free frames */ + iwl4965_clear_free_frames(priv); +} + +static void iwl4965_down(struct iwl_priv *priv) +{ + mutex_lock(&priv->mutex); + __iwl4965_down(priv); + mutex_unlock(&priv->mutex); + + iwl4965_cancel_deferred_work(priv); +} + +#define HW_READY_TIMEOUT (50) + +static int iwl4965_set_hw_ready(struct iwl_priv *priv) +{ + int ret = 0; + + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); + + /* See if we got it */ + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, + HW_READY_TIMEOUT); + if (ret != -ETIMEDOUT) + priv->hw_ready = true; + else + priv->hw_ready = false; + + IWL_DEBUG_INFO(priv, "hardware %s\n", + (priv->hw_ready == 1) ? "ready" : "not ready"); + return ret; +} + +static int iwl4965_prepare_card_hw(struct iwl_priv *priv) +{ + int ret = 0; + + IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n"); + + ret = iwl4965_set_hw_ready(priv); + if (priv->hw_ready) + return ret; + + /* If HW is not ready, prepare the conditions to check again */ + iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_PREPARE); + + ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, + ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, + CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); + + /* HW should be ready by now, check again. */ + if (ret != -ETIMEDOUT) + iwl4965_set_hw_ready(priv); + + return ret; +} + +#define MAX_HW_RESTARTS 5 + +static int __iwl4965_up(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx; + int i; + int ret; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { + IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); + return -EIO; + } + + if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { + IWL_ERR(priv, "ucode not available for device bringup\n"); + return -EIO; + } + + for_each_context(priv, ctx) { + ret = iwl4965_alloc_bcast_station(priv, ctx); + if (ret) { + iwl_legacy_dealloc_bcast_stations(priv); + return ret; + } + } + + iwl4965_prepare_card_hw(priv); + + if (!priv->hw_ready) { + IWL_WARN(priv, "Exit HW not ready\n"); + return -EIO; + } + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, + CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + if (iwl_legacy_is_rfkill(priv)) { + wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); + + iwl_legacy_enable_interrupts(priv); + IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); + return 0; + } + + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + + /* must be initialised before iwl_hw_nic_init */ + priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; + + ret = iwl4965_hw_nic_init(priv); + if (ret) { + IWL_ERR(priv, "Unable to init nic\n"); + return ret; + } + + /* make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + /* clear (again), then enable host interrupts */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_legacy_enable_interrupts(priv); + + /* really make sure rfkill handshake bits are cleared */ + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); + + /* Copy original ucode data image from disk into backup cache. + * This will be used to initialize the on-board processor's + * data SRAM for a clean start when the runtime program first loads. */ + memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, + priv->ucode_data.len); + + for (i = 0; i < MAX_HW_RESTARTS; i++) { + + /* load bootstrap state machine, + * load bootstrap program into processor's memory, + * prepare to load the "initialize" uCode */ + ret = priv->cfg->ops->lib->load_ucode(priv); + + if (ret) { + IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n", + ret); + continue; + } + + /* start card; "initialize" will load runtime ucode */ + iwl4965_nic_start(priv); + + IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); + + return 0; + } + + set_bit(STATUS_EXIT_PENDING, &priv->status); + __iwl4965_down(priv); + clear_bit(STATUS_EXIT_PENDING, &priv->status); + + /* tried to restart and config the device for as long as our + * patience could withstand */ + IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); + return -EIO; +} + + +/***************************************************************************** + * + * Workqueue callbacks + * + *****************************************************************************/ + +static void iwl4965_bg_init_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, init_alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + priv->cfg->ops->lib->init_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl4965_bg_alive_start(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, alive_start.work); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl4965_alive_start(priv); + mutex_unlock(&priv->mutex); +} + +static void iwl4965_bg_run_time_calib_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + run_time_calib_work); + + mutex_lock(&priv->mutex); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) { + mutex_unlock(&priv->mutex); + return; + } + + if (priv->start_calib) { + iwl4965_chain_noise_calibration(priv, + (void *)&priv->_4965.statistics); + iwl4965_sensitivity_calibration(priv, + (void *)&priv->_4965.statistics); + } + + mutex_unlock(&priv->mutex); +} + +static void iwl4965_bg_restart(struct work_struct *data) +{ + struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { + struct iwl_rxon_context *ctx; + + mutex_lock(&priv->mutex); + for_each_context(priv, ctx) + ctx->vif = NULL; + priv->is_open = 0; + + __iwl4965_down(priv); + + mutex_unlock(&priv->mutex); + iwl4965_cancel_deferred_work(priv); + ieee80211_restart_hw(priv->hw); + } else { + iwl4965_down(priv); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + __iwl4965_up(priv); + mutex_unlock(&priv->mutex); + } +} + +static void iwl4965_bg_rx_replenish(struct work_struct *data) +{ + struct iwl_priv *priv = + container_of(data, struct iwl_priv, rx_replenish); + + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + iwl4965_rx_replenish(priv); + mutex_unlock(&priv->mutex); +} + +/***************************************************************************** + * + * mac80211 entry point functions + * + *****************************************************************************/ + +#define UCODE_READY_TIMEOUT (4 * HZ) + +/* + * Not a mac80211 entry point function, but it fits in with all the + * other mac80211 functions grouped here. + */ +static int iwl4965_mac_setup_register(struct iwl_priv *priv, + u32 max_probe_length) +{ + int ret; + struct ieee80211_hw *hw = priv->hw; + struct iwl_rxon_context *ctx; + + hw->rate_control_algorithm = "iwl-4965-rs"; + + /* Tell mac80211 our characteristics */ + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_AMPDU_AGGREGATION | + IEEE80211_HW_NEED_DTIM_PERIOD | + IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_REPORTS_TX_ACK_STATUS; + + if (priv->cfg->sku & IWL_SKU_N) + hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | + IEEE80211_HW_SUPPORTS_STATIC_SMPS; + + hw->sta_data_size = sizeof(struct iwl_station_priv); + hw->vif_data_size = sizeof(struct iwl_vif_priv); + + for_each_context(priv, ctx) { + hw->wiphy->interface_modes |= ctx->interface_modes; + hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; + } + + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | + WIPHY_FLAG_DISABLE_BEACON_HINTS; + + /* + * For now, disable PS by default because it affects + * RX performance significantly. + */ + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + + hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; + /* we create the 802.11 header and a zero-length SSID element */ + hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2; + + /* Default value; 4 EDCA QOS priorities */ + hw->queues = 4; + + hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; + + if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &priv->bands[IEEE80211_BAND_2GHZ]; + if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) + priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &priv->bands[IEEE80211_BAND_5GHZ]; + + iwl_legacy_leds_init(priv); + + ret = ieee80211_register_hw(priv->hw); + if (ret) { + IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); + return ret; + } + priv->mac80211_registered = 1; + + return 0; +} + + +int iwl4965_mac_start(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + /* we should be verifying the device is ready to be opened */ + mutex_lock(&priv->mutex); + ret = __iwl4965_up(priv); + mutex_unlock(&priv->mutex); + + if (ret) + return ret; + + if (iwl_legacy_is_rfkill(priv)) + goto out; + + IWL_DEBUG_INFO(priv, "Start UP work done.\n"); + + /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from + * mac80211 will not be run successfully. */ + ret = wait_event_interruptible_timeout(priv->wait_command_queue, + test_bit(STATUS_READY, &priv->status), + UCODE_READY_TIMEOUT); + if (!ret) { + if (!test_bit(STATUS_READY, &priv->status)) { + IWL_ERR(priv, "START_ALIVE timeout after %dms.\n", + jiffies_to_msecs(UCODE_READY_TIMEOUT)); + return -ETIMEDOUT; + } + } + + iwl4965_led_enable(priv); + +out: + priv->is_open = 1; + IWL_DEBUG_MAC80211(priv, "leave\n"); + return 0; +} + +void iwl4965_mac_stop(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (!priv->is_open) + return; + + priv->is_open = 0; + + iwl4965_down(priv); + + flush_workqueue(priv->workqueue); + + /* enable interrupts again in order to receive rfkill changes */ + iwl_write32(priv, CSR_INT, 0xFFFFFFFF); + iwl_legacy_enable_interrupts(priv); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct iwl_priv *priv = hw->priv; + + IWL_DEBUG_MACDUMP(priv, "enter\n"); + + IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, + ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); + + if (iwl4965_tx_skb(priv, skb)) + dev_kfree_skb_any(skb); + + IWL_DEBUG_MACDUMP(priv, "leave\n"); +} + +void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_key_conf *keyconf, + struct ieee80211_sta *sta, + u32 iv32, u16 *phase1key) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta, + iv32, phase1key); + + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + struct iwl_rxon_context *ctx = vif_priv->ctx; + int ret; + u8 sta_id; + bool is_default_wep_key = false; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (priv->cfg->mod_params->sw_crypto) { + IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); + return -EOPNOTSUPP; + } + + sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta); + if (sta_id == IWL_INVALID_STATION) + return -EINVAL; + + mutex_lock(&priv->mutex); + iwl_legacy_scan_cancel_timeout(priv, 100); + + /* + * If we are getting WEP group key and we didn't receive any key mapping + * so far, we are in legacy wep mode (group key only), otherwise we are + * in 1X mode. + * In legacy wep mode, we use another host command to the uCode. + */ + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || + key->cipher == WLAN_CIPHER_SUITE_WEP104) && + !sta) { + if (cmd == SET_KEY) + is_default_wep_key = !ctx->key_mapping_keys; + else + is_default_wep_key = + (key->hw_key_idx == HW_KEY_DEFAULT); + } + + switch (cmd) { + case SET_KEY: + if (is_default_wep_key) + ret = iwl4965_set_default_wep_key(priv, + vif_priv->ctx, key); + else + ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx, + key, sta_id); + + IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); + break; + case DISABLE_KEY: + if (is_default_wep_key) + ret = iwl4965_remove_default_wep_key(priv, ctx, key); + else + ret = iwl4965_remove_dynamic_key(priv, ctx, + key, sta_id); + + IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&priv->mutex); + IWL_DEBUG_MAC80211(priv, "leave\n"); + + return ret; +} + +int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) +{ + struct iwl_priv *priv = hw->priv; + int ret = -EINVAL; + + IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", + sta->addr, tid); + + if (!(priv->cfg->sku & IWL_SKU_N)) + return -EACCES; + + mutex_lock(&priv->mutex); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + IWL_DEBUG_HT(priv, "start Rx\n"); + ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn); + break; + case IEEE80211_AMPDU_RX_STOP: + IWL_DEBUG_HT(priv, "stop Rx\n"); + ret = iwl4965_sta_rx_agg_stop(priv, sta, tid); + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + ret = 0; + break; + case IEEE80211_AMPDU_TX_START: + IWL_DEBUG_HT(priv, "start Tx\n"); + ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn); + if (ret == 0) { + priv->_4965.agg_tids_count++; + IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n", + priv->_4965.agg_tids_count); + } + break; + case IEEE80211_AMPDU_TX_STOP: + IWL_DEBUG_HT(priv, "stop Tx\n"); + ret = iwl4965_tx_agg_stop(priv, vif, sta, tid); + if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) { + priv->_4965.agg_tids_count--; + IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n", + priv->_4965.agg_tids_count); + } + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + ret = 0; + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + ret = 0; + break; + } + mutex_unlock(&priv->mutex); + + return ret; +} + +int iwl4965_mac_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_priv *priv = hw->priv; + struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; + struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; + bool is_ap = vif->type == NL80211_IFTYPE_STATION; + int ret; + u8 sta_id; + + IWL_DEBUG_INFO(priv, "received request to add station %pM\n", + sta->addr); + mutex_lock(&priv->mutex); + IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", + sta->addr); + sta_priv->common.sta_id = IWL_INVALID_STATION; + + atomic_set(&sta_priv->pending_frames, 0); + + ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr, + is_ap, sta, &sta_id); + if (ret) { + IWL_ERR(priv, "Unable to add station %pM (%d)\n", + sta->addr, ret); + /* Should we return success if return code is EEXIST ? */ + mutex_unlock(&priv->mutex); + return ret; + } + + sta_priv->common.sta_id = sta_id; + + /* Initialize rate scaling */ + IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", + sta->addr); + iwl4965_rs_rate_init(priv, sta, sta_id); + mutex_unlock(&priv->mutex); + + return 0; +} + +void iwl4965_mac_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_channel_switch *ch_switch) +{ + struct iwl_priv *priv = hw->priv; + const struct iwl_channel_info *ch_info; + struct ieee80211_conf *conf = &hw->conf; + struct ieee80211_channel *channel = ch_switch->channel; + struct iwl_ht_config *ht_conf = &priv->current_ht_config; + + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + u16 ch; + unsigned long flags = 0; + + IWL_DEBUG_MAC80211(priv, "enter\n"); + + if (iwl_legacy_is_rfkill(priv)) + goto out_exit; + + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) + goto out_exit; + + if (!iwl_legacy_is_associated_ctx(ctx)) + goto out_exit; + + /* channel switch in progress */ + if (priv->switch_rxon.switch_in_progress == true) + goto out_exit; + + mutex_lock(&priv->mutex); + if (priv->cfg->ops->lib->set_channel_switch) { + + ch = channel->hw_value; + if (le16_to_cpu(ctx->active.channel) != ch) { + ch_info = iwl_legacy_get_channel_info(priv, + channel->band, + ch); + if (!iwl_legacy_is_channel_valid(ch_info)) { + IWL_DEBUG_MAC80211(priv, "invalid channel\n"); + goto out; + } + spin_lock_irqsave(&priv->lock, flags); + + priv->current_ht_config.smps = conf->smps_mode; + + /* Configure HT40 channels */ + ctx->ht.enabled = conf_is_ht(conf); + if (ctx->ht.enabled) { + if (conf_is_ht40_minus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_BELOW; + ctx->ht.is_40mhz = true; + } else if (conf_is_ht40_plus(conf)) { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_ABOVE; + ctx->ht.is_40mhz = true; + } else { + ctx->ht.extension_chan_offset = + IEEE80211_HT_PARAM_CHA_SEC_NONE; + ctx->ht.is_40mhz = false; + } + } else + ctx->ht.is_40mhz = false; + + if ((le16_to_cpu(ctx->staging.channel) != ch)) + ctx->staging.flags = 0; + + iwl_legacy_set_rxon_channel(priv, channel, ctx); + iwl_legacy_set_rxon_ht(priv, ht_conf); + iwl_legacy_set_flags_for_band(priv, ctx, channel->band, + ctx->vif); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl_legacy_set_rate(priv); + /* + * at this point, staging_rxon has the + * configuration for channel switch + */ + if (priv->cfg->ops->lib->set_channel_switch(priv, + ch_switch)) + priv->switch_rxon.switch_in_progress = false; + } + } +out: + mutex_unlock(&priv->mutex); +out_exit: + if (!priv->switch_rxon.switch_in_progress) + ieee80211_chswitch_done(ctx->vif, false); + IWL_DEBUG_MAC80211(priv, "leave\n"); +} + +void iwl4965_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + struct iwl_rxon_context *ctx; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */ + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->mutex); + + for_each_context(priv, ctx) { + ctx->staging.filter_flags &= ~filter_nand; + ctx->staging.filter_flags |= filter_or; + + /* + * Not committing directly because hardware can perform a scan, + * but we'll eventually commit the filter flags change anyway. + */ + } + + mutex_unlock(&priv->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_legacy_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + +/***************************************************************************** + * + * driver setup and teardown + * + *****************************************************************************/ + +static void iwl4965_bg_txpower_work(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + txpower_work); + + /* If a scan happened to start before we got here + * then just return; the statistics notification will + * kick off another scheduled work to compensate for + * any temperature delta we missed here. */ + if (test_bit(STATUS_EXIT_PENDING, &priv->status) || + test_bit(STATUS_SCANNING, &priv->status)) + return; + + mutex_lock(&priv->mutex); + + /* Regardless of if we are associated, we must reconfigure the + * TX power since frames can be sent on non-radar channels while + * not associated */ + priv->cfg->ops->lib->send_tx_power(priv); + + /* Update last_temperature to keep is_calib_needed from running + * when it isn't needed... */ + priv->last_temperature = priv->temperature; + + mutex_unlock(&priv->mutex); +} + +static void iwl4965_setup_deferred_work(struct iwl_priv *priv) +{ + priv->workqueue = create_singlethread_workqueue(DRV_NAME); + + init_waitqueue_head(&priv->wait_command_queue); + + INIT_WORK(&priv->restart, iwl4965_bg_restart); + INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish); + INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work); + INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start); + INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start); + + iwl_legacy_setup_scan_deferred_work(priv); + + INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); + + init_timer(&priv->statistics_periodic); + priv->statistics_periodic.data = (unsigned long)priv; + priv->statistics_periodic.function = iwl4965_bg_statistics_periodic; + + init_timer(&priv->ucode_trace); + priv->ucode_trace.data = (unsigned long)priv; + priv->ucode_trace.function = iwl4965_bg_ucode_trace; + + init_timer(&priv->watchdog); + priv->watchdog.data = (unsigned long)priv; + priv->watchdog.function = iwl_legacy_bg_watchdog; + + tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) + iwl4965_irq_tasklet, (unsigned long)priv); +} + +static void iwl4965_cancel_deferred_work(struct iwl_priv *priv) +{ + cancel_work_sync(&priv->txpower_work); + cancel_delayed_work_sync(&priv->init_alive_start); + cancel_delayed_work(&priv->alive_start); + cancel_work_sync(&priv->run_time_calib_work); + + iwl_legacy_cancel_scan_deferred_work(priv); + + del_timer_sync(&priv->statistics_periodic); + del_timer_sync(&priv->ucode_trace); +} + +static void iwl4965_init_hw_rates(struct iwl_priv *priv, + struct ieee80211_rate *rates) +{ + int i; + + for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { + rates[i].bitrate = iwlegacy_rates[i].ieee * 5; + rates[i].hw_value = i; /* Rate scaling will work on indexes */ + rates[i].hw_value_short = i; + rates[i].flags = 0; + if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) { + /* + * If CCK != 1M then set short preamble rate flag. + */ + rates[i].flags |= + (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ? + 0 : IEEE80211_RATE_SHORT_PREAMBLE; + } + } +} +/* + * Acquire priv->lock before calling this function ! + */ +void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index) +{ + iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, + (index & 0xff) | (txq_id << 8)); + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index); +} + +void iwl4965_tx_queue_set_status(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + int tx_fifo_id, int scd_retry) +{ + int txq_id = txq->q.id; + + /* Find out whether to activate Tx queue */ + int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; + + /* Set up and activate */ + iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id), + (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) | + (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) | + (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) | + IWL49_SCD_QUEUE_STTS_REG_MSK); + + txq->sched_retry = scd_retry; + + IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n", + active ? "Activate" : "Deactivate", + scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); +} + + +static int iwl4965_init_drv(struct iwl_priv *priv) +{ + int ret; + + spin_lock_init(&priv->sta_lock); + spin_lock_init(&priv->hcmd_lock); + + INIT_LIST_HEAD(&priv->free_frames); + + mutex_init(&priv->mutex); + mutex_init(&priv->sync_cmd_mutex); + + priv->ieee_channels = NULL; + priv->ieee_rates = NULL; + priv->band = IEEE80211_BAND_2GHZ; + + priv->iw_mode = NL80211_IFTYPE_STATION; + priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; + priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; + priv->_4965.agg_tids_count = 0; + + /* initialize force reset */ + priv->force_reset[IWL_RF_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_RF_RESET; + priv->force_reset[IWL_FW_RESET].reset_duration = + IWL_DELAY_NEXT_FORCE_FW_RELOAD; + + /* Choose which receivers/antennas to use */ + if (priv->cfg->ops->hcmd->set_rxon_chain) + priv->cfg->ops->hcmd->set_rxon_chain(priv, + &priv->contexts[IWL_RXON_CTX_BSS]); + + iwl_legacy_init_scan_params(priv); + + /* Set the tx_power_user_lmt to the lowest power level + * this value will get overwritten by channel max power avg + * from eeprom */ + priv->tx_power_user_lmt = IWL4965_TX_POWER_TARGET_POWER_MIN; + priv->tx_power_next = IWL4965_TX_POWER_TARGET_POWER_MIN; + + ret = iwl_legacy_init_channel_map(priv); + if (ret) { + IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); + goto err; + } + + ret = iwl_legacy_init_geos(priv); + if (ret) { + IWL_ERR(priv, "initializing geos failed: %d\n", ret); + goto err_free_channel_map; + } + iwl4965_init_hw_rates(priv, priv->ieee_rates); + + return 0; + +err_free_channel_map: + iwl_legacy_free_channel_map(priv); +err: + return ret; +} + +static void iwl4965_uninit_drv(struct iwl_priv *priv) +{ + iwl4965_calib_free_results(priv); + iwl_legacy_free_geos(priv); + iwl_legacy_free_channel_map(priv); + kfree(priv->scan_cmd); +} + +static void iwl4965_hw_detect(struct iwl_priv *priv) +{ + priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV); + priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG); + pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id); + IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); +} + +static int iwl4965_set_hw_params(struct iwl_priv *priv) +{ + priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; + priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; + if (priv->cfg->mod_params->amsdu_size_8K) + priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K); + else + priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K); + + priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL; + + if (priv->cfg->mod_params->disable_11n) + priv->cfg->sku &= ~IWL_SKU_N; + + /* Device-specific setup */ + return priv->cfg->ops->lib->set_hw_params(priv); +} + +static const u8 iwl4965_bss_ac_to_fifo[] = { + IWL_TX_FIFO_VO, + IWL_TX_FIFO_VI, + IWL_TX_FIFO_BE, + IWL_TX_FIFO_BK, +}; + +static const u8 iwl4965_bss_ac_to_queue[] = { + 0, 1, 2, 3, +}; + +static int +iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + int err = 0, i; + struct iwl_priv *priv; + struct ieee80211_hw *hw; + struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); + unsigned long flags; + u16 pci_cmd; + + /************************ + * 1. Allocating HW data + ************************/ + + hw = iwl_legacy_alloc_all(cfg); + if (!hw) { + err = -ENOMEM; + goto out; + } + priv = hw->priv; + /* At this point both hw and priv are allocated. */ + + /* + * The default context is always valid, + * more may be discovered when firmware + * is loaded. + */ + priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); + + for (i = 0; i < NUM_IWL_RXON_CTX; i++) + priv->contexts[i].ctxid = i; + + priv->contexts[IWL_RXON_CTX_BSS].always_active = true; + priv->contexts[IWL_RXON_CTX_BSS].is_active = true; + priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; + priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; + priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; + priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; + priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; + priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; + priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo; + priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue; + priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes = + BIT(NL80211_IFTYPE_ADHOC); + priv->contexts[IWL_RXON_CTX_BSS].interface_modes = + BIT(NL80211_IFTYPE_STATION); + priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP; + priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; + priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; + priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; + + BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1); + + SET_IEEE80211_DEV(hw, &pdev->dev); + + IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); + priv->cfg = cfg; + priv->pci_dev = pdev; + priv->inta_mask = CSR_INI_SET_MASK; + + if (iwl_legacy_alloc_traffic_mem(priv)) + IWL_ERR(priv, "Not enough memory to generate traffic log\n"); + + /************************** + * 2. Initializing PCI bus + **************************/ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + if (pci_enable_device(pdev)) { + err = -ENODEV; + goto out_ieee80211_free_hw; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (err) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!err) + err = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(32)); + /* both attempts failed: */ + if (err) { + IWL_WARN(priv, "No suitable DMA available.\n"); + goto out_pci_disable_device; + } + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) + goto out_pci_disable_device; + + pci_set_drvdata(pdev, priv); + + + /*********************** + * 3. Read REV register + ***********************/ + priv->hw_base = pci_iomap(pdev, 0, 0); + if (!priv->hw_base) { + err = -ENODEV; + goto out_pci_release_regions; + } + + IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", + (unsigned long long) pci_resource_len(pdev, 0)); + IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); + + /* these spin locks will be used in apm_ops.init and EEPROM access + * we should init now + */ + spin_lock_init(&priv->reg_lock); + spin_lock_init(&priv->lock); + + /* + * stop and reset the on-board processor just in case it is in a + * strange state ... like being left stranded by a primary kernel + * and this is now the kdump kernel trying to start up + */ + iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); + + iwl4965_hw_detect(priv); + IWL_INFO(priv, "Detected %s, REV=0x%X\n", + priv->cfg->name, priv->hw_rev); + + /* We disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state */ + pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + + iwl4965_prepare_card_hw(priv); + if (!priv->hw_ready) { + IWL_WARN(priv, "Failed, HW not ready\n"); + goto out_iounmap; + } + + /***************** + * 4. Read EEPROM + *****************/ + /* Read the EEPROM */ + err = iwl_legacy_eeprom_init(priv); + if (err) { + IWL_ERR(priv, "Unable to init EEPROM\n"); + goto out_iounmap; + } + err = iwl4965_eeprom_check_version(priv); + if (err) + goto out_free_eeprom; + + if (err) + goto out_free_eeprom; + + /* extract MAC Address */ + iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr); + IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); + priv->hw->wiphy->addresses = priv->addresses; + priv->hw->wiphy->n_addresses = 1; + + /************************ + * 5. Setup HW constants + ************************/ + if (iwl4965_set_hw_params(priv)) { + IWL_ERR(priv, "failed to set hw parameters\n"); + goto out_free_eeprom; + } + + /******************* + * 6. Setup priv + *******************/ + + err = iwl4965_init_drv(priv); + if (err) + goto out_free_eeprom; + /* At this point both hw and priv are initialized. */ + + /******************** + * 7. Setup services + ********************/ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + pci_enable_msi(priv->pci_dev); + + err = request_irq(priv->pci_dev->irq, iwl_legacy_isr, + IRQF_SHARED, DRV_NAME, priv); + if (err) { + IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); + goto out_disable_msi; + } + + iwl4965_setup_deferred_work(priv); + iwl4965_setup_rx_handlers(priv); + + /********************************************* + * 8. Enable interrupts and read RFKILL state + *********************************************/ + + /* enable interrupts if needed: hw bug w/a */ + pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); + } + + iwl_legacy_enable_interrupts(priv); + + /* If platform's RF_KILL switch is NOT set to KILL */ + if (iwl_read32(priv, CSR_GP_CNTRL) & + CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) + clear_bit(STATUS_RF_KILL_HW, &priv->status); + else + set_bit(STATUS_RF_KILL_HW, &priv->status); + + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + + iwl_legacy_power_initialize(priv); + + init_completion(&priv->_4965.firmware_loading_complete); + + err = iwl4965_request_firmware(priv, true); + if (err) + goto out_destroy_workqueue; + + return 0; + + out_destroy_workqueue: + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + free_irq(priv->pci_dev->irq, priv); + out_disable_msi: + pci_disable_msi(priv->pci_dev); + iwl4965_uninit_drv(priv); + out_free_eeprom: + iwl_legacy_eeprom_free(priv); + out_iounmap: + pci_iounmap(pdev, priv->hw_base); + out_pci_release_regions: + pci_set_drvdata(pdev, NULL); + pci_release_regions(pdev); + out_pci_disable_device: + pci_disable_device(pdev); + out_ieee80211_free_hw: + iwl_legacy_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); + out: + return err; +} + +static void __devexit iwl4965_pci_remove(struct pci_dev *pdev) +{ + struct iwl_priv *priv = pci_get_drvdata(pdev); + unsigned long flags; + + if (!priv) + return; + + wait_for_completion(&priv->_4965.firmware_loading_complete); + + IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); + + iwl_legacy_dbgfs_unregister(priv); + sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); + + /* ieee80211_unregister_hw call wil cause iwl_mac_stop to + * to be called and iwl4965_down since we are removing the device + * we need to set STATUS_EXIT_PENDING bit. + */ + set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_legacy_leds_exit(priv); + + if (priv->mac80211_registered) { + ieee80211_unregister_hw(priv->hw); + priv->mac80211_registered = 0; + } else { + iwl4965_down(priv); + } + + /* + * Make sure device is reset to low power before unloading driver. + * This may be redundant with iwl4965_down(), but there are paths to + * run iwl4965_down() without calling apm_ops.stop(), and there are + * paths to avoid running iwl4965_down() at all before leaving driver. + * This (inexpensive) call *makes sure* device is reset. + */ + iwl_legacy_apm_stop(priv); + + /* make sure we flush any pending irq or + * tasklet for the driver + */ + spin_lock_irqsave(&priv->lock, flags); + iwl_legacy_disable_interrupts(priv); + spin_unlock_irqrestore(&priv->lock, flags); + + iwl4965_synchronize_irq(priv); + + iwl4965_dealloc_ucode_pci(priv); + + if (priv->rxq.bd) + iwl4965_rx_queue_free(priv, &priv->rxq); + iwl4965_hw_txq_ctx_free(priv); + + iwl_legacy_eeprom_free(priv); + + + /*netif_stop_queue(dev); */ + flush_workqueue(priv->workqueue); + + /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes + * priv->workqueue... so we can't take down the workqueue + * until now... */ + destroy_workqueue(priv->workqueue); + priv->workqueue = NULL; + iwl_legacy_free_traffic_mem(priv); + + free_irq(priv->pci_dev->irq, priv); + pci_disable_msi(priv->pci_dev); + pci_iounmap(pdev, priv->hw_base); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + + iwl4965_uninit_drv(priv); + + dev_kfree_skb(priv->beacon_skb); + + ieee80211_free_hw(priv->hw); +} + +/* + * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask + * must be called under priv->lock and mac access + */ +void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask) +{ + iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask); +} + +/***************************************************************************** + * + * driver and module entry point + * + *****************************************************************************/ + +/* Hardware specific file defines the PCI IDs table for that hardware module */ +static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = { +#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965) + {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)}, + {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)}, +#endif /* CONFIG_IWL4965 */ + + {0} +}; +MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids); + +static struct pci_driver iwl4965_driver = { + .name = DRV_NAME, + .id_table = iwl4965_hw_card_ids, + .probe = iwl4965_pci_probe, + .remove = __devexit_p(iwl4965_pci_remove), + .driver.pm = IWL_LEGACY_PM_OPS, +}; + +static int __init iwl4965_init(void) +{ + + int ret; + pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); + pr_info(DRV_COPYRIGHT "\n"); + + ret = iwl4965_rate_control_register(); + if (ret) { + pr_err("Unable to register rate control algorithm: %d\n", ret); + return ret; + } + + ret = pci_register_driver(&iwl4965_driver); + if (ret) { + pr_err("Unable to initialize PCI module\n"); + goto error_register; + } + + return ret; + +error_register: + iwl4965_rate_control_unregister(); + return ret; +} + +static void __exit iwl4965_exit(void) +{ + pci_unregister_driver(&iwl4965_driver); + iwl4965_rate_control_unregister(); +} + +module_exit(iwl4965_exit); +module_init(iwl4965_init); + +#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG +module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "debug output mask"); +#endif + +module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO); +MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); +module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO); +MODULE_PARM_DESC(queues_num, "number of hw queues."); +module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO); +MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); +module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, + int, S_IRUGO); +MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); +module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO); +MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index ed424574160e..17d555f2215a 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig @@ -1,14 +1,52 @@ -config IWLWIFI - tristate "Intel Wireless Wifi" +config IWLAGN + tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlagn) " depends on PCI && MAC80211 select FW_LOADER + select NEW_LEDS + select LEDS_CLASS + select LEDS_TRIGGERS + select MAC80211_LEDS + ---help--- + Select to build the driver supporting the: + + Intel Wireless WiFi Link Next-Gen AGN + + This option enables support for use with the following hardware: + Intel Wireless WiFi Link 6250AGN Adapter + Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN) + Intel WiFi Link 1000BGN + Intel Wireless WiFi 5150AGN + Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN + Intel 6005 Series Wi-Fi Adapters + Intel 6030 Series Wi-Fi Adapters + Intel Wireless WiFi Link 6150BGN 2 Adapter + Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN) + Intel 2000 Series Wi-Fi Adapters + + + This driver uses the kernel's mac80211 subsystem. + + In order to use this driver, you will need a microcode (uCode) + image for it. You can obtain the microcode from: + + <http://intellinuxwireless.org/>. + + The microcode is typically installed in /lib/firmware. You can + look in the hotplug script /etc/hotplug/firmware.agent to + determine which directory FIRMWARE_DIR is set to when the script + runs. + + If you want to compile the driver as a module ( = code which can be + inserted in and removed from the running kernel whenever you want), + say M here and read <file:Documentation/kbuild/modules.txt>. The + module will be called iwlagn. menu "Debugging Options" - depends on IWLWIFI + depends on IWLAGN config IWLWIFI_DEBUG - bool "Enable full debugging output in iwlagn and iwl3945 drivers" - depends on IWLWIFI + bool "Enable full debugging output in the iwlagn driver" + depends on IWLAGN ---help--- This option will enable debug tracing output for the iwlwifi drivers @@ -33,7 +71,7 @@ config IWLWIFI_DEBUG config IWLWIFI_DEBUGFS bool "iwlagn debugfs support" - depends on IWLWIFI && MAC80211_DEBUGFS + depends on IWLAGN && MAC80211_DEBUGFS ---help--- Enable creation of debugfs files for the iwlwifi drivers. This is a low-impact option that allows getting insight into the @@ -41,13 +79,13 @@ config IWLWIFI_DEBUGFS config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE bool "Experimental uCode support" - depends on IWLWIFI && IWLWIFI_DEBUG + depends on IWLAGN && IWLWIFI_DEBUG ---help--- Enable use of experimental ucode for testing and debugging. config IWLWIFI_DEVICE_TRACING bool "iwlwifi device access tracing" - depends on IWLWIFI + depends on IWLAGN depends on EVENT_TRACING help Say Y here to trace all commands, including TX frames and IO @@ -64,73 +102,19 @@ config IWLWIFI_DEVICE_TRACING occur. endmenu -config IWLAGN - tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)" - depends on IWLWIFI - ---help--- - Select to build the driver supporting the: - - Intel Wireless WiFi Link Next-Gen AGN - - This driver uses the kernel's mac80211 subsystem. - - In order to use this driver, you will need a microcode (uCode) - image for it. You can obtain the microcode from: - - <http://intellinuxwireless.org/>. - - The microcode is typically installed in /lib/firmware. You can - look in the hotplug script /etc/hotplug/firmware.agent to - determine which directory FIRMWARE_DIR is set to when the script - runs. - - If you want to compile the driver as a module ( = code which can be - inserted in and removed from the running kernel whenever you want), - say M here and read <file:Documentation/kbuild/modules.txt>. The - module will be called iwlagn. - - -config IWL4965 - bool "Intel Wireless WiFi 4965AGN" - depends on IWLAGN - ---help--- - This option enables support for Intel Wireless WiFi Link 4965AGN - -config IWL5000 - bool "Intel Wireless-N/Advanced-N/Ultimate-N WiFi Link" +config IWL_P2P + bool "iwlwifi experimental P2P support" depends on IWLAGN - ---help--- - This option enables support for use with the following hardware: - Intel Wireless WiFi Link 6250AGN Adapter - Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN) - Intel WiFi Link 1000BGN - Intel Wireless WiFi 5150AGN - Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN - Intel 6000 Gen 2 Series Wi-Fi Adapters (6000G2A and 6000G2B) - Intel WIreless WiFi Link 6050BGN Gen 2 Adapter - Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN) - -config IWL3945 - tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" - depends on IWLWIFI - ---help--- - Select to build the driver supporting the: - - Intel PRO/Wireless 3945ABG/BG Network Connection - - This driver uses the kernel's mac80211 subsystem. - - In order to use this driver, you will need a microcode (uCode) - image for it. You can obtain the microcode from: + help + This option enables experimental P2P support for some devices + based on microcode support. Since P2P support is still under + development, this option may even enable it for some devices + now that turn out to not support it in the future due to + microcode restrictions. - <http://intellinuxwireless.org/>. + To determine if your microcode supports the experimental P2P + offered by this option, check if the driver advertises AP + support when it is loaded. - The microcode is typically installed in /lib/firmware. You can - look in the hotplug script /etc/hotplug/firmware.agent to - determine which directory FIRMWARE_DIR is set to when the script - runs. + Say Y only if you want to experiment with P2P. - If you want to compile the driver as a module ( = code which can be - inserted in and removed from the running kernel whenever you want), - say M here and read <file:Documentation/kbuild/modules.txt>. The - module will be called iwl3945. diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index 93380f97835f..9d6ee836426c 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile @@ -1,35 +1,23 @@ -obj-$(CONFIG_IWLWIFI) += iwlcore.o -iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o -iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o -iwlcore-objs += iwl-scan.o iwl-led.o -iwlcore-$(CONFIG_IWL3945) += iwl-legacy.o -iwlcore-$(CONFIG_IWL4965) += iwl-legacy.o -iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o -iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o - -# If 3945 is selected only, iwl-legacy.o will be added -# to iwlcore-m above, but it needs to be built in. -iwlcore-objs += $(iwlcore-m) - -CFLAGS_iwl-devtrace.o := -I$(src) - # AGN obj-$(CONFIG_IWLAGN) += iwlagn.o iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o -iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o +iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o -iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o -iwlagn-$(CONFIG_IWL4965) += iwl-4965.o -iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o -iwlagn-$(CONFIG_IWL5000) += iwl-5000.o -iwlagn-$(CONFIG_IWL5000) += iwl-6000.o -iwlagn-$(CONFIG_IWL5000) += iwl-1000.o +iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o +iwlagn-objs += iwl-rx.o iwl-tx.o iwl-sta.o +iwlagn-objs += iwl-scan.o iwl-led.o +iwlagn-objs += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o +iwlagn-objs += iwl-5000.o +iwlagn-objs += iwl-6000.o +iwlagn-objs += iwl-1000.o +iwlagn-objs += iwl-2000.o + +iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o +iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o +iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o -# 3945 -obj-$(CONFIG_IWL3945) += iwl3945.o -iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o -iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o +CFLAGS_iwl-devtrace.o := -I$(src) ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index ba78bc8a259f..27c5007e577c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -49,7 +49,7 @@ #include "iwl-agn-debugfs.h" /* Highest firmware API version supported */ -#define IWL1000_UCODE_API_MAX 3 +#define IWL1000_UCODE_API_MAX 5 #define IWL100_UCODE_API_MAX 5 /* Lowest firmware API version supported */ @@ -232,8 +232,6 @@ static struct iwl_lib_ops iwl1000_lib = { .bt_stats_read = iwl_ucode_bt_stats_read, .reply_tx_error = iwl_reply_tx_error_read, }, - .check_plcp_health = iwl_good_plcp_health, - .check_ack_health = iwl_good_ack_health, .txfifo_flush = iwlagn_txfifo_flush, .dev_txfifo_flush = iwlagn_dev_txfifo_flush, .tt_ops = { diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c new file mode 100644 index 000000000000..d7b6126408c9 --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -0,0 +1,560 @@ +/****************************************************************************** + * + * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * Intel Linux Wireless <ilw@linux.intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + *****************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/wireless.h> +#include <net/mac80211.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> + +#include "iwl-eeprom.h" +#include "iwl-dev.h" +#include "iwl-core.h" +#include "iwl-io.h" +#include "iwl-sta.h" +#include "iwl-agn.h" +#include "iwl-helpers.h" +#include "iwl-agn-hw.h" +#include "iwl-6000-hw.h" +#include "iwl-agn-led.h" +#include "iwl-agn-debugfs.h" + +/* Highest firmware API version supported */ +#define IWL2030_UCODE_API_MAX 5 +#define IWL2000_UCODE_API_MAX 5 +#define IWL200_UCODE_API_MAX 5 + +/* Lowest firmware API version supported */ +#define IWL2030_UCODE_API_MIN 5 +#define IWL2000_UCODE_API_MIN 5 +#define IWL200_UCODE_API_MIN 5 + +#define IWL2030_FW_PRE "iwlwifi-2030-" +#define _IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode" +#define IWL2030_MODULE_FIRMWARE(api) _IWL2030_MODULE_FIRMWARE(api) + +#define IWL2000_FW_PRE "iwlwifi-2000-" +#define _IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode" +#define IWL2000_MODULE_FIRMWARE(api) _IWL2000_MODULE_FIRMWARE(api) + +#define IWL200_FW_PRE "iwlwifi-200-" +#define _IWL200_MODULE_FIRMWARE(api) IWL200_FW_PRE #api ".ucode" +#define IWL200_MODULE_FIRMWARE(api) _IWL200_MODULE_FIRMWARE(api) + +static void iwl2000_set_ct_threshold(struct iwl_priv *priv) +{ + /* want Celsius */ + priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; + priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; +} + +/* NIC configuration for 2000 series */ +static void iwl2000_nic_config(struct iwl_priv *priv) +{ + u16 radio_cfg; + + radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); + + /* write radio config values to register */ + if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | + EEPROM_RF_CFG_STEP_MSK(radio_cfg) | + EEPROM_RF_CFG_DASH_MSK(radio_cfg)); + + /* set CSR_HW_CONFIG_REG for uCode use */ + iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | + CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); + + if (priv->cfg->iq_invert) + iwl_set_bit(priv, CSR_GP_DRIVER_REG, + CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); + +} + +static struct iwl_sensitivity_ranges iwl2000_sensitivity = { + .min_nrg_cck = 97, + .max_nrg_cck = 0, /* not used, set to 0 */ + .auto_corr_min_ofdm = 80, + .auto_corr_min_ofdm_mrc = 128, + .auto_corr_min_ofdm_x1 = 105, + .auto_corr_min_ofdm_mrc_x1 = 192, + + .auto_corr_max_ofdm = 145, + .auto_corr_max_ofdm_mrc = 232, + .auto_corr_max_ofdm_x1 = 110, + .auto_corr_max_ofdm_mrc_x1 = 232, + + .auto_corr_min_cck = 125, + .auto_corr_max_cck = 175, + .auto_corr_min_cck_mrc = 160, + .auto_corr_max_cck_mrc = 310, + .nrg_th_cck = 97, + .nrg_th_ofdm = 100, + + .barker_corr_th_min = 190, + .barker_corr_th_min_mrc = 390, + .nrg_th_cca = 62, +}; + +static int iwl2000_hw_set_hw_params(struct iwl_priv *priv) +{ + if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && + priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES) + priv->cfg->base_params->num_of_queues = + priv->cfg->mod_params->num_of_queues; + + priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; + priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; + priv->hw_params.scd_bc_tbls_size = + priv->cfg->base_params->num_of_queues * + sizeof(struct iwlagn_scd_bc_tbl); + priv->hw_params.tfd_size = sizeof(struct iwl_tfd); + priv->hw_params.max_stations = IWLAGN_STATION_COUNT; + priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; + + priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; + priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; + + priv->hw_params.max_bsm_size = 0; + priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | + BIT(IEEE80211_BAND_5GHZ); + priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; + + priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); + if (priv->cfg->rx_with_siso_diversity) + priv->hw_params.rx_chains_num = 1; + else + priv->hw_params.rx_chains_num = + num_of_ant(priv->cfg->valid_rx_ant); + priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; + priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; + + iwl2000_set_ct_threshold(priv); + + /* Set initial sensitivity parameters */ + /* Set initial calibration set */ + priv->hw_params.sens = &iwl2000_sensitivity; + priv->hw_params.calib_init_cfg = + BIT(IWL_CALIB_XTAL) | + BIT(IWL_CALIB_LO) | + BIT(IWL_CALIB_TX_IQ) | + BIT(IWL_CALIB_BASE_BAND); + if (priv->cfg->need_dc_calib) + priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX); + if (priv->cfg->need_temp_offset_calib) + priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET); + + priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; + + return 0; +} + +static int iwl2030_hw_channel_switch(struct iwl_priv *priv, + struct ieee80211_channel_switch *ch_switch) +{ + /* + * MULTI-FIXME + * See iwl_mac_channel_switch. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl6000_channel_switch_cmd cmd; + const struct iwl_channel_info *ch_info; + u32 switch_time_in_usec, ucode_switch_time; + u16 ch; + u32 tsf_low; + u8 switch_count; + u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval); + struct ieee80211_vif *vif = ctx->vif; + struct iwl_host_cmd hcmd = { + .id = REPLY_CHANNEL_SWITCH, + .len = sizeof(cmd), + .flags = CMD_SYNC, + .data = &cmd, + }; + + cmd.band = priv->band == IEEE80211_BAND_2GHZ; + ch = ch_switch->channel->hw_value; + IWL_DEBUG_11H(priv, "channel switch from %u to %u\n", + ctx->active.channel, ch); + cmd.channel = cpu_to_le16(ch); + cmd.rxon_flags = ctx->staging.flags; + cmd.rxon_filter_flags = ctx->staging.filter_flags; + switch_count = ch_switch->count; + tsf_low = ch_switch->timestamp & 0x0ffffffff; + /* + * calculate the ucode channel switch time + * adding TSF as one of the factor for when to switch + */ + if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) { + if (switch_count > ((priv->ucode_beacon_time - tsf_low) / + beacon_interval)) { + switch_count -= (priv->ucode_beacon_time - + tsf_low) / beacon_interval; + } else + switch_count = 0; + } + if (switch_count <= 1) + cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); + else { + switch_time_in_usec = + vif->bss_conf.beacon_int * switch_count * TIME_UNIT; + ucode_switch_time = iwl_usecs_to_beacons(priv, + switch_time_in_usec, + beacon_interval); + cmd.switch_time = iwl_add_beacon_time(priv, + priv->ucode_beacon_time, + ucode_switch_time, + beacon_interval); + } + IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n", + cmd.switch_time); + ch_info = iwl_get_channel_info(priv, priv->band, ch); + if (ch_info) + cmd.expect_beacon = is_channel_radar(ch_info); + else { + IWL_ERR(priv, "invalid channel switch from %u to %u\n", + ctx->active.channel, ch); + return -EFAULT; + } + priv->switch_rxon.channel = cmd.channel; + priv->switch_rxon.switch_in_progress = true; + + return iwl_send_cmd_sync(priv, &hcmd); +} + +static struct iwl_lib_ops iwl2000_lib = { + .set_hw_params = iwl2000_hw_set_hw_params, + .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, + .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, + .txq_set_sched = iwlagn_txq_set_sched, + .txq_agg_enable = iwlagn_txq_agg_enable, + .txq_agg_disable = iwlagn_txq_agg_disable, + .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, + .txq_free_tfd = iwl_hw_txq_free_tfd, + .txq_init = iwl_hw_tx_queue_init, + .rx_handler_setup = iwlagn_rx_handler_setup, + .setup_deferred_work = iwlagn_bt_setup_deferred_work, + .cancel_deferred_work = iwlagn_bt_cancel_deferred_work, + .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, + .load_ucode = iwlagn_load_ucode, + .dump_nic_event_log = iwl_dump_nic_event_log, + .dump_nic_error_log = iwl_dump_nic_error_log, + .dump_csr = iwl_dump_csr, + .dump_fh = iwl_dump_fh, + .init_alive_start = iwlagn_init_alive_start, + .alive_notify = iwlagn_alive_notify, + .send_tx_power = iwlagn_send_tx_power, + .update_chain_flags = iwl_update_chain_flags, + .set_channel_switch = iwl2030_hw_channel_switch, + .apm_ops = { + .init = iwl_apm_init, + .config = iwl2000_nic_config, + }, + .eeprom_ops = { + .regulatory_bands = { + EEPROM_REG_BAND_1_CHANNELS, + EEPROM_REG_BAND_2_CHANNELS, + EEPROM_REG_BAND_3_CHANNELS, + EEPROM_REG_BAND_4_CHANNELS, + EEPROM_REG_BAND_5_CHANNELS, + EEPROM_6000_REG_BAND_24_HT40_CHANNELS, + EEPROM_REG_BAND_52_HT40_CHANNELS + }, + .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, + .release_semaphore = iwlcore_eeprom_release_semaphore, + .calib_version = iwlagn_eeprom_calib_version, + .query_addr = iwlagn_eeprom_query_addr, + .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, + }, + .isr_ops = { + .isr = iwl_isr_ict, + .free = iwl_free_isr_ict, + .alloc = iwl_alloc_isr_ict, + .reset = iwl_reset_ict, + .disable = iwl_disable_ict, + }, + .temp_ops = { + .temperature = iwlagn_temperature, + }, + .debugfs_ops = { + .rx_stats_read = iwl_ucode_rx_stats_read, + .tx_stats_read = iwl_ucode_tx_stats_read, + .general_stats_read = iwl_ucode_general_stats_read, + .bt_stats_read = iwl_ucode_bt_stats_read, + .reply_tx_error = iwl_reply_tx_error_read, + }, + .txfifo_flush = iwlagn_txfifo_flush, + .dev_txfifo_flush = iwlagn_dev_txfifo_flush, + .tt_ops = { + .lower_power_detection = iwl_tt_is_low_power_state, + .tt_power_mode = iwl_tt_current_power_mode, + .ct_kill_check = iwl_check_for_ct_kill, + } +}; + +static const struct iwl_ops iwl2000_ops = { + .lib = &iwl2000_lib, + .hcmd = &iwlagn_hcmd, + .utils = &iwlagn_hcmd_utils, + .led = &iwlagn_led_ops, + .ieee80211_ops = &iwlagn_hw_ops, +}; + +static const struct iwl_ops iwl2030_ops = { + .lib = &iwl2000_lib, + .hcmd = &iwlagn_bt_hcmd, + .utils = &iwlagn_hcmd_utils, + .led = &iwlagn_led_ops, + .ieee80211_ops = &iwlagn_hw_ops, +}; + +static const struct iwl_ops iwl200_ops = { + .lib = &iwl2000_lib, + .hcmd = &iwlagn_hcmd, + .utils = &iwlagn_hcmd_utils, + .led = &iwlagn_led_ops, + .ieee80211_ops = &iwlagn_hw_ops, +}; + +static const struct iwl_ops iwl230_ops = { + .lib = &iwl2000_lib, + .hcmd = &iwlagn_bt_hcmd, + .utils = &iwlagn_hcmd_utils, + .led = &iwlagn_led_ops, + .ieee80211_ops = &iwlagn_hw_ops, +}; + +static struct iwl_base_params iwl2000_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .num_of_queues = IWLAGN_NUM_QUEUES, + .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .max_ll_items = OTP_MAX_LL_ITEMS_2x00, + .shadow_ram_support = true, + .led_compensation = 51, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .wd_timeout = IWL_DEF_WD_TIMEOUT, + .max_event_log_size = 512, + .ucode_tracing = true, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, + .shadow_reg_enable = true, +}; + + +static struct iwl_base_params iwl2030_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE, + .num_of_queues = IWLAGN_NUM_QUEUES, + .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, + .pll_cfg_val = 0, + .set_l0s = true, + .use_bsm = false, + .max_ll_items = OTP_MAX_LL_ITEMS_2x00, + .shadow_ram_support = true, + .led_compensation = 57, + .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, + .adv_thermal_throttle = true, + .support_ct_kill_exit = true, + .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, + .chain_noise_scale = 1000, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .ucode_tracing = true, + .sensitivity_calib_by_driver = true, + .chain_noise_calib_by_driver = true, + .shadow_reg_enable = true, +}; + +static struct iwl_ht_params iwl2000_ht_params = { + .ht_greenfield_support = true, + .use_rts_for_aggregation = true, /* use rts/cts protection */ +}; + +static struct iwl_bt_params iwl2030_bt_params = { + .bt_statistics = true, + /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ + .advanced_bt_coexist = true, + .agg_time_limit = BT_AGG_THRESHOLD_DEF, + .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, + .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, + .bt_sco_disable = true, + .bt_session_2 = true, +}; + +#define IWL_DEVICE_2000 \ + .fw_name_pre = IWL2000_FW_PRE, \ + .ucode_api_max = IWL2000_UCODE_API_MAX, \ + .ucode_api_min = IWL2000_UCODE_API_MIN, \ + .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ + .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ + .ops = &iwl2000_ops, \ + .mod_params = &iwlagn_mod_params, \ + .base_params = &iwl2000_base_params, \ + .need_dc_calib = true, \ + .need_temp_offset_calib = true, \ + .led_mode = IWL_LED_RF_STATE, \ + .iq_invert = true \ + +struct iwl_cfg iwl2000_2bgn_cfg = { + .name = "2000 Series 2x2 BGN", + IWL_DEVICE_2000, + .ht_params = &iwl2000_ht_params, +}; + +struct iwl_cfg iwl2000_2bg_cfg = { + .name = "2000 Series 2x2 BG", + IWL_DEVICE_2000, +}; + +#define IWL_DEVICE_2030 \ + .fw_name_pre = IWL2030_FW_PRE, \ + .ucode_api_max = IWL2030_UCODE_API_MAX, \ + .ucode_api_min = IWL2030_UCODE_API_MIN, \ + .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ + .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ + .ops = &iwl2030_ops, \ + .mod_params = &iwlagn_mod_params, \ + .base_params = &iwl2030_base_params, \ + .bt_params = &iwl2030_bt_params, \ + .need_dc_calib = true, \ + .need_temp_offset_calib = true, \ + .led_mode = IWL_LED_RF_STATE, \ + .adv_pm = true, \ + .iq_invert = true \ + +struct iwl_cfg iwl2030_2bgn_cfg = { + .name = "2000 Series 2x2 BGN/BT", + IWL_DEVICE_2030, + .ht_params = &iwl2000_ht_params, +}; + +struct iwl_cfg iwl2030_2bg_cfg = { + .name = "2000 Series 2x2 BG/BT", + IWL_DEVICE_2030, +}; + +#define IWL_DEVICE_6035 \ + .fw_name_pre = IWL2030_FW_PRE, \ + .ucode_api_max = IWL2030_UCODE_API_MAX, \ + .ucode_api_min = IWL2030_UCODE_API_MIN, \ + .eeprom_ver = EEPROM_6035_EEPROM_VERSION, \ + .eeprom_calib_ver = EEPROM_6035_TX_POWER_VERSION, \ + .ops = &iwl2030_ops, \ + .mod_params = &iwlagn_mod_params, \ + .base_params = &iwl2030_base_params, \ + .bt_params = &iwl2030_bt_params, \ + .need_dc_calib = true, \ + .need_temp_offset_calib = true, \ + .led_mode = IWL_LED_RF_STATE, \ + .adv_pm = true \ + +struct iwl_cfg iwl6035_2agn_cfg = { + .name = "2000 Series 2x2 AGN/BT", + IWL_DEVICE_6035, + .ht_params = &iwl2000_ht_params, +}; + +struct iwl_cfg iwl6035_2abg_cfg = { + .name = "2000 Series 2x2 ABG/BT", + IWL_DEVICE_6035, +}; + +struct iwl_cfg iwl6035_2bg_cfg = { + .name = "2000 Series 2x2 BG/BT", + IWL_DEVICE_6035, +}; + +#define IWL_DEVICE_200 \ + .fw_name_pre = IWL200_FW_PRE, \ + .ucode_api_max = IWL200_UCODE_API_MAX, \ + .ucode_api_min = IWL200_UCODE_API_MIN, \ + .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ + .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ + .ops = &iwl200_ops, \ + .mod_params = &iwlagn_mod_params, \ + .base_params = &iwl2000_base_params, \ + .need_dc_calib = true, \ + .need_temp_offset_calib = true, \ + .led_mode = IWL_LED_RF_STATE, \ + .adv_pm = true, \ + .rx_with_siso_diversity = true \ + +struct iwl_cfg iwl200_bg_cfg = { + .name = "200 Series 1x1 BG", + IWL_DEVICE_200, +}; + +struct iwl_cfg iwl200_bgn_cfg = { + .name = "200 Series 1x1 BGN", + IWL_DEVICE_200, + .ht_params = &iwl2000_ht_params, +}; + +#define IWL_DEVICE_230 \ + .fw_name_pre = IWL200_FW_PRE, \ + .ucode_api_max = IWL200_UCODE_API_MAX, \ + .ucode_api_min = IWL200_UCODE_API_MIN, \ + .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ + .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ + .ops = &iwl230_ops, \ + .mod_params = &iwlagn_mod_params, \ + .base_params = &iwl2030_base_params, \ + .bt_params = &iwl2030_bt_params, \ + .need_dc_calib = true, \ + .need_temp_offset_calib = true, \ + .led_mode = IWL_LED_RF_STATE, \ + .adv_pm = true, \ + .rx_with_siso_diversity = true \ + +struct iwl_cfg iwl230_bg_cfg = { + .name = "200 Series 1x1 BG/BT", + IWL_DEVICE_230, +}; + +struct iwl_cfg iwl230_bgn_cfg = { + .name = "200 Series 1x1 BGN/BT", + IWL_DEVICE_230, + .ht_params = &iwl2000_ht_params, +}; + +MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL200_MODULE_FIRMWARE(IWL200_UCODE_API_MAX)); diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 537fb8c84e3a..3ea31b659d1a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -402,8 +402,6 @@ static struct iwl_lib_ops iwl5000_lib = { .bt_stats_read = iwl_ucode_bt_stats_read, .reply_tx_error = iwl_reply_tx_error_read, }, - .check_plcp_health = iwl_good_plcp_health, - .check_ack_health = iwl_good_ack_health, .txfifo_flush = iwlagn_txfifo_flush, .dev_txfifo_flush = iwlagn_dev_txfifo_flush, .tt_ops = { @@ -471,8 +469,6 @@ static struct iwl_lib_ops iwl5150_lib = { .bt_stats_read = iwl_ucode_bt_stats_read, .reply_tx_error = iwl_reply_tx_error_read, }, - .check_plcp_health = iwl_good_plcp_health, - .check_ack_health = iwl_good_ack_health, .txfifo_flush = iwlagn_txfifo_flush, .dev_txfifo_flush = iwlagn_dev_txfifo_flush, .tt_ops = { diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index ef36aff1bb43..a745b01c0ec1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -67,13 +67,13 @@ #define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode" #define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api) -#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-" -#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode" -#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api) +#define IWL6005_FW_PRE "iwlwifi-6000g2a-" +#define _IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode" +#define IWL6005_MODULE_FIRMWARE(api) _IWL6005_MODULE_FIRMWARE(api) -#define IWL6000G2B_FW_PRE "iwlwifi-6000g2b-" -#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode" -#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api) +#define IWL6030_FW_PRE "iwlwifi-6000g2b-" +#define _IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode" +#define IWL6030_MODULE_FIRMWARE(api) _IWL6030_MODULE_FIRMWARE(api) static void iwl6000_set_ct_threshold(struct iwl_priv *priv) { @@ -90,7 +90,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv) CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); } -static void iwl6050g2_additional_nic_config(struct iwl_priv *priv) +static void iwl6150_additional_nic_config(struct iwl_priv *priv) { /* Indicate calibration version to uCode. */ if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6) @@ -343,8 +343,6 @@ static struct iwl_lib_ops iwl6000_lib = { .bt_stats_read = iwl_ucode_bt_stats_read, .reply_tx_error = iwl_reply_tx_error_read, }, - .check_plcp_health = iwl_good_plcp_health, - .check_ack_health = iwl_good_ack_health, .txfifo_flush = iwlagn_txfifo_flush, .dev_txfifo_flush = iwlagn_dev_txfifo_flush, .tt_ops = { @@ -354,7 +352,7 @@ static struct iwl_lib_ops iwl6000_lib = { } }; -static struct iwl_lib_ops iwl6000g2b_lib = { +static struct iwl_lib_ops iwl6030_lib = { .set_hw_params = iwl6000_hw_set_hw_params, .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl, .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl, @@ -415,8 +413,6 @@ static struct iwl_lib_ops iwl6000g2b_lib = { .bt_stats_read = iwl_ucode_bt_stats_read, .reply_tx_error = iwl_reply_tx_error_read, }, - .check_plcp_health = iwl_good_plcp_health, - .check_ack_health = iwl_good_ack_health, .txfifo_flush = iwlagn_txfifo_flush, .dev_txfifo_flush = iwlagn_dev_txfifo_flush, .tt_ops = { @@ -430,8 +426,8 @@ static struct iwl_nic_ops iwl6050_nic_ops = { .additional_nic_config = &iwl6050_additional_nic_config, }; -static struct iwl_nic_ops iwl6050g2_nic_ops = { - .additional_nic_config = &iwl6050g2_additional_nic_config, +static struct iwl_nic_ops iwl6150_nic_ops = { + .additional_nic_config = &iwl6150_additional_nic_config, }; static const struct iwl_ops iwl6000_ops = { @@ -451,17 +447,17 @@ static const struct iwl_ops iwl6050_ops = { .ieee80211_ops = &iwlagn_hw_ops, }; -static const struct iwl_ops iwl6050g2_ops = { +static const struct iwl_ops iwl6150_ops = { .lib = &iwl6000_lib, .hcmd = &iwlagn_hcmd, .utils = &iwlagn_hcmd_utils, .led = &iwlagn_led_ops, - .nic = &iwl6050g2_nic_ops, + .nic = &iwl6150_nic_ops, .ieee80211_ops = &iwlagn_hw_ops, }; -static const struct iwl_ops iwl6000g2b_ops = { - .lib = &iwl6000g2b_lib, +static const struct iwl_ops iwl6030_ops = { + .lib = &iwl6030_lib, .hcmd = &iwlagn_bt_hcmd, .utils = &iwlagn_hcmd_utils, .led = &iwlagn_led_ops, @@ -479,7 +475,6 @@ static struct iwl_base_params iwl6000_base_params = { .shadow_ram_support = true, .led_compensation = 51, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, - .supports_idle = true, .adv_thermal_throttle = true, .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, @@ -503,7 +498,6 @@ static struct iwl_base_params iwl6050_base_params = { .shadow_ram_support = true, .led_compensation = 51, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, - .supports_idle = true, .adv_thermal_throttle = true, .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, @@ -526,7 +520,6 @@ static struct iwl_base_params iwl6000_g2_base_params = { .shadow_ram_support = true, .led_compensation = 57, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, - .supports_idle = true, .adv_thermal_throttle = true, .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, @@ -555,11 +548,11 @@ static struct iwl_bt_params iwl6000_bt_params = { }; #define IWL_DEVICE_6005 \ - .fw_name_pre = IWL6000G2A_FW_PRE, \ + .fw_name_pre = IWL6005_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ - .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \ - .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \ + .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \ + .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ .ops = &iwl6000_ops, \ .mod_params = &iwlagn_mod_params, \ .base_params = &iwl6000_g2_base_params, \ @@ -584,12 +577,12 @@ struct iwl_cfg iwl6005_2bg_cfg = { }; #define IWL_DEVICE_6030 \ - .fw_name_pre = IWL6000G2B_FW_PRE, \ + .fw_name_pre = IWL6030_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ - .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \ - .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \ - .ops = &iwl6000g2b_ops, \ + .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ + .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ + .ops = &iwl6030_ops, \ .mod_params = &iwlagn_mod_params, \ .base_params = &iwl6000_g2_base_params, \ .bt_params = &iwl6000_bt_params, \ @@ -708,9 +701,9 @@ struct iwl_cfg iwl6150_bgn_cfg = { .fw_name_pre = IWL6050_FW_PRE, .ucode_api_max = IWL6050_UCODE_API_MAX, .ucode_api_min = IWL6050_UCODE_API_MIN, - .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION, - .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION, - .ops = &iwl6050g2_ops, + .eeprom_ver = EEPROM_6150_EEPROM_VERSION, + .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, + .ops = &iwl6150_ops, .mod_params = &iwlagn_mod_params, .base_params = &iwl6050_base_params, .ht_params = &iwl6000_ht_params, @@ -736,5 +729,5 @@ struct iwl_cfg iwl6000_3agn_cfg = { MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c index d16bb5ede014..9006293e740c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c @@ -631,8 +631,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp) } spin_lock_irqsave(&priv->lock, flags); - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) { + if (iwl_bt_statistics(priv)) { rx_info = &(((struct iwl_bt_notif_statistics *)resp)-> rx.general.common); ofdm = &(((struct iwl_bt_notif_statistics *)resp)->rx.ofdm); @@ -897,8 +896,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) } spin_lock_irqsave(&priv->lock, flags); - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) { + if (iwl_bt_statistics(priv)) { rx_info = &(((struct iwl_bt_notif_statistics *)stat_resp)-> rx.general.common); } else { @@ -913,8 +911,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp) rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK); rxon_chnum = le16_to_cpu(ctx->staging.channel); - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) { + if (iwl_bt_statistics(priv)) { stat_band24 = !!(((struct iwl_bt_notif_statistics *) stat_resp)->flag & STATISTICS_REPLY_FLG_BAND_24G_MSK); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c index a6dbd8983dac..b500aaae53ec 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c @@ -39,8 +39,7 @@ static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz) int p = 0; u32 flag; - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) + if (iwl_bt_statistics(priv)) flag = le32_to_cpu(priv->_agn.statistics_bt.flag); else flag = le32_to_cpu(priv->_agn.statistics.flag); @@ -89,8 +88,7 @@ ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf, * the last statistics notification from uCode * might not reflect the current uCode activity */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) { + if (iwl_bt_statistics(priv)) { ofdm = &priv->_agn.statistics_bt.rx.ofdm; cck = &priv->_agn.statistics_bt.rx.cck; general = &priv->_agn.statistics_bt.rx.general.common; @@ -536,8 +534,7 @@ ssize_t iwl_ucode_tx_stats_read(struct file *file, * the last statistics notification from uCode * might not reflect the current uCode activity */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) { + if (iwl_bt_statistics(priv)) { tx = &priv->_agn.statistics_bt.tx; accum_tx = &priv->_agn.accum_statistics_bt.tx; delta_tx = &priv->_agn.delta_statistics_bt.tx; @@ -737,8 +734,7 @@ ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf, * the last statistics notification from uCode * might not reflect the current uCode activity */ - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) { + if (iwl_bt_statistics(priv)) { general = &priv->_agn.statistics_bt.general.common; dbg = &priv->_agn.statistics_bt.general.common.dbg; div = &priv->_agn.statistics_bt.general.common.div; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c index 366340f3fb0f..41543ad4cb84 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c @@ -305,7 +305,11 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv) cmd.slots[0].type = 0; /* BSS */ cmd.slots[1].type = 1; /* PAN */ - if (ctx_bss->vif && ctx_pan->vif) { + if (priv->_agn.hw_roc_channel) { + /* both contexts must be used for this to happen */ + slot1 = priv->_agn.hw_roc_duration; + slot0 = IWL_MIN_SLOT_TIME; + } else if (ctx_bss->vif && ctx_pan->vif) { int bcnint = ctx_pan->vif->bss_conf.beacon_int; int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1; @@ -330,12 +334,12 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv) if (test_bit(STATUS_SCAN_HW, &priv->status) || (!ctx_bss->vif->bss_conf.idle && !ctx_bss->vif->bss_conf.assoc)) { - slot0 = dtim * bcnint * 3 - 20; - slot1 = 20; + slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; + slot1 = IWL_MIN_SLOT_TIME; } else if (!ctx_pan->vif->bss_conf.idle && !ctx_pan->vif->bss_conf.assoc) { - slot1 = bcnint * 3 - 20; - slot0 = 20; + slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME; + slot0 = IWL_MIN_SLOT_TIME; } } else if (ctx_pan->vif) { slot0 = 0; @@ -344,8 +348,8 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv) slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1); if (test_bit(STATUS_SCAN_HW, &priv->status)) { - slot0 = slot1 * 3 - 20; - slot1 = 20; + slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME; + slot1 = IWL_MIN_SLOT_TIME; } } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c index 1a24946bc203..c1190d965614 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c @@ -63,23 +63,11 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) } /* Set led register off */ -static int iwl_led_on_reg(struct iwl_priv *priv) +void iwlagn_led_enable(struct iwl_priv *priv) { - IWL_DEBUG_LED(priv, "led on\n"); iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON); - return 0; -} - -/* Set led register off */ -static int iwl_led_off_reg(struct iwl_priv *priv) -{ - IWL_DEBUG_LED(priv, "LED Reg off\n"); - iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF); - return 0; } const struct iwl_led_ops iwlagn_led_ops = { .cmd = iwl_send_led_cmd, - .on = iwl_led_on_reg, - .off = iwl_led_off_reg, }; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h index a594e4fdc6b8..96f323dc5dd6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h @@ -28,5 +28,6 @@ #define __iwl_agn_led_h__ extern const struct iwl_led_ops iwlagn_led_ops; +void iwlagn_led_enable(struct iwl_priv *priv); #endif /* __iwl_agn_led_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 3dee87e8f55d..2003c1d4295f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -473,6 +473,11 @@ void iwlagn_rx_handler_setup(struct iwl_priv *priv) priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] = iwlagn_rx_calib_complete; priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; + + /* set up notification wait support */ + spin_lock_init(&priv->_agn.notif_wait_lock); + INIT_LIST_HEAD(&priv->_agn.notif_waits); + init_waitqueue_head(&priv->_agn.notif_waitq); } void iwlagn_setup_deferred_work(struct iwl_priv *priv) @@ -528,9 +533,10 @@ int iwlagn_send_tx_power(struct iwl_priv *priv) void iwlagn_temperature(struct iwl_priv *priv) { - /* store temperature from statistics (in Celsius) */ - priv->temperature = - le32_to_cpu(priv->_agn.statistics.general.common.temperature); + /* store temperature from correct statistics (in Celsius) */ + priv->temperature = le32_to_cpu((iwl_bt_statistics(priv)) ? + priv->_agn.statistics_bt.general.common.temperature : + priv->_agn.statistics.general.common.temperature); iwl_tt_handler(priv); } @@ -604,6 +610,7 @@ const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv, struct iwl_mod_params iwlagn_mod_params = { .amsdu_size_8K = 1, .restart_fw = 1, + .plcp_check = true, /* the rest are 0 by default */ }; @@ -988,240 +995,6 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) return -1; } -/* Calc max signal level (dBm) among 3 possible receivers */ -static inline int iwlagn_calc_rssi(struct iwl_priv *priv, - struct iwl_rx_phy_res *rx_resp) -{ - return priv->cfg->ops->utils->calc_rssi(priv, rx_resp); -} - -static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) -{ - u32 decrypt_out = 0; - - if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == - RX_RES_STATUS_STATION_FOUND) - decrypt_out |= (RX_RES_STATUS_STATION_FOUND | - RX_RES_STATUS_NO_STATION_INFO_MISMATCH); - - decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); - - /* packet was not encrypted */ - if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == - RX_RES_STATUS_SEC_TYPE_NONE) - return decrypt_out; - - /* packet was encrypted with unknown alg */ - if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == - RX_RES_STATUS_SEC_TYPE_ERR) - return decrypt_out; - - /* decryption was not done in HW */ - if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != - RX_MPDU_RES_STATUS_DEC_DONE_MSK) - return decrypt_out; - - switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { - - case RX_RES_STATUS_SEC_TYPE_CCMP: - /* alg is CCM: check MIC only */ - if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) - /* Bad MIC */ - decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; - else - decrypt_out |= RX_RES_STATUS_DECRYPT_OK; - - break; - - case RX_RES_STATUS_SEC_TYPE_TKIP: - if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { - /* Bad TTAK */ - decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; - break; - } - /* fall through if TTAK OK */ - default: - if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) - decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; - else - decrypt_out |= RX_RES_STATUS_DECRYPT_OK; - break; - } - - IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", - decrypt_in, decrypt_out); - - return decrypt_out; -} - -static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, - struct ieee80211_hdr *hdr, - u16 len, - u32 ampdu_status, - struct iwl_rx_mem_buffer *rxb, - struct ieee80211_rx_status *stats) -{ - struct sk_buff *skb; - __le16 fc = hdr->frame_control; - - /* We only process data packets if the interface is open */ - if (unlikely(!priv->is_open)) { - IWL_DEBUG_DROP_LIMIT(priv, - "Dropping packet while interface is not open.\n"); - return; - } - - /* In case of HW accelerated crypto and bad decryption, drop */ - if (!priv->cfg->mod_params->sw_crypto && - iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) - return; - - skb = dev_alloc_skb(128); - if (!skb) { - IWL_ERR(priv, "dev_alloc_skb failed\n"); - return; - } - - skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); - - iwl_update_stats(priv, false, fc, len); - memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); - - ieee80211_rx(priv->hw, skb); - priv->alloc_rxb_page--; - rxb->page = NULL; -} - -/* Called for REPLY_RX (legacy ABG frames), or - * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ -void iwlagn_rx_reply_rx(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct ieee80211_hdr *header; - struct ieee80211_rx_status rx_status; - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_rx_phy_res *phy_res; - __le32 rx_pkt_status; - struct iwl_rx_mpdu_res_start *amsdu; - u32 len; - u32 ampdu_status; - u32 rate_n_flags; - - /** - * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. - * REPLY_RX: physical layer info is in this buffer - * REPLY_RX_MPDU_CMD: physical layer info was sent in separate - * command and cached in priv->last_phy_res - * - * Here we set up local variables depending on which command is - * received. - */ - if (pkt->hdr.cmd == REPLY_RX) { - phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; - header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) - + phy_res->cfg_phy_cnt); - - len = le16_to_cpu(phy_res->byte_count); - rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + - phy_res->cfg_phy_cnt + len); - ampdu_status = le32_to_cpu(rx_pkt_status); - } else { - if (!priv->_agn.last_phy_res_valid) { - IWL_ERR(priv, "MPDU frame without cached PHY data\n"); - return; - } - phy_res = &priv->_agn.last_phy_res; - amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; - header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); - len = le16_to_cpu(amsdu->byte_count); - rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); - ampdu_status = iwlagn_translate_rx_status(priv, - le32_to_cpu(rx_pkt_status)); - } - - if ((unlikely(phy_res->cfg_phy_cnt > 20))) { - IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", - phy_res->cfg_phy_cnt); - return; - } - - if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || - !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { - IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", - le32_to_cpu(rx_pkt_status)); - return; - } - - /* This will be used in several places later */ - rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); - - /* rx_status carries information about the packet to mac80211 */ - rx_status.mactime = le64_to_cpu(phy_res->timestamp); - rx_status.freq = - ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel)); - rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; - rx_status.rate_idx = - iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); - rx_status.flag = 0; - - /* TSF isn't reliable. In order to allow smooth user experience, - * this W/A doesn't propagate it to the mac80211 */ - /*rx_status.flag |= RX_FLAG_TSFT;*/ - - priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); - - /* Find max signal strength (dBm) among 3 antenna/receiver chains */ - rx_status.signal = iwlagn_calc_rssi(priv, phy_res); - - iwl_dbg_log_rx_data_frame(priv, len, header); - IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", - rx_status.signal, (unsigned long long)rx_status.mactime); - - /* - * "antenna number" - * - * It seems that the antenna field in the phy flags value - * is actually a bit field. This is undefined by radiotap, - * it wants an actual antenna number but I always get "7" - * for most legacy frames I receive indicating that the - * same frame was received on all three RX chains. - * - * I think this field should be removed in favor of a - * new 802.11n radiotap field "RX chains" that is defined - * as a bitmask. - */ - rx_status.antenna = - (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) - >> RX_RES_PHY_FLAGS_ANTENNA_POS; - - /* set the preamble flag if appropriate */ - if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) - rx_status.flag |= RX_FLAG_SHORTPRE; - - /* Set up the HT phy flags */ - if (rate_n_flags & RATE_MCS_HT_MSK) - rx_status.flag |= RX_FLAG_HT; - if (rate_n_flags & RATE_MCS_HT40_MSK) - rx_status.flag |= RX_FLAG_40MHZ; - if (rate_n_flags & RATE_MCS_SGI_MSK) - rx_status.flag |= RX_FLAG_SHORT_GI; - - iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, - rxb, &rx_status); -} - -/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). - * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ -void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - priv->_agn.last_phy_res_valid = true; - memcpy(&priv->_agn.last_phy_res, pkt->u.raw, - sizeof(struct iwl_rx_phy_res)); -} - static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, struct ieee80211_vif *vif, enum ieee80211_band band, @@ -1342,6 +1115,18 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv, return added; } +static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen) +{ + struct sk_buff *skb = priv->_agn.offchan_tx_skb; + + if (skb->len < maxlen) + maxlen = skb->len; + + memcpy(data, skb->data, maxlen); + + return maxlen; +} + int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) { struct iwl_host_cmd cmd = { @@ -1384,20 +1169,25 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; scan->quiet_time = IWL_ACTIVE_QUIET_TIME; - if (iwl_is_any_associated(priv)) { + if (priv->scan_type != IWL_SCAN_OFFCH_TX && + iwl_is_any_associated(priv)) { u16 interval = 0; u32 extra; u32 suspend_time = 100; u32 scan_suspend_time = 100; - unsigned long flags; IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); - spin_lock_irqsave(&priv->lock, flags); - if (priv->is_internal_short_scan) + switch (priv->scan_type) { + case IWL_SCAN_OFFCH_TX: + WARN_ON(1); + break; + case IWL_SCAN_RADIO_RESET: interval = 0; - else + break; + case IWL_SCAN_NORMAL: interval = vif->bss_conf.beacon_int; - spin_unlock_irqrestore(&priv->lock, flags); + break; + } scan->suspend_time = 0; scan->max_out_time = cpu_to_le32(200 * 1024); @@ -1410,29 +1200,41 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) scan->suspend_time = cpu_to_le32(scan_suspend_time); IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", scan_suspend_time, interval); + } else if (priv->scan_type == IWL_SCAN_OFFCH_TX) { + scan->suspend_time = 0; + scan->max_out_time = + cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout); } - if (priv->is_internal_short_scan) { + switch (priv->scan_type) { + case IWL_SCAN_RADIO_RESET: IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); - } else if (priv->scan_request->n_ssids) { - int i, p = 0; - IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); - for (i = 0; i < priv->scan_request->n_ssids; i++) { - /* always does wildcard anyway */ - if (!priv->scan_request->ssids[i].ssid_len) - continue; - scan->direct_scan[p].id = WLAN_EID_SSID; - scan->direct_scan[p].len = - priv->scan_request->ssids[i].ssid_len; - memcpy(scan->direct_scan[p].ssid, - priv->scan_request->ssids[i].ssid, - priv->scan_request->ssids[i].ssid_len); - n_probes++; - p++; - } - is_active = true; - } else - IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); + break; + case IWL_SCAN_NORMAL: + if (priv->scan_request->n_ssids) { + int i, p = 0; + IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); + for (i = 0; i < priv->scan_request->n_ssids; i++) { + /* always does wildcard anyway */ + if (!priv->scan_request->ssids[i].ssid_len) + continue; + scan->direct_scan[p].id = WLAN_EID_SSID; + scan->direct_scan[p].len = + priv->scan_request->ssids[i].ssid_len; + memcpy(scan->direct_scan[p].ssid, + priv->scan_request->ssids[i].ssid, + priv->scan_request->ssids[i].ssid_len); + n_probes++; + p++; + } + is_active = true; + } else + IWL_DEBUG_SCAN(priv, "Start passive scan.\n"); + break; + case IWL_SCAN_OFFCH_TX: + IWL_DEBUG_SCAN(priv, "Start offchannel TX scan.\n"); + break; + } scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; scan->tx_cmd.sta_id = ctx->bcast_sta_id; @@ -1530,38 +1332,77 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; scan->rx_chain = cpu_to_le16(rx_chain); - if (!priv->is_internal_short_scan) { + switch (priv->scan_type) { + case IWL_SCAN_NORMAL: cmd_len = iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, vif->addr, priv->scan_request->ie, priv->scan_request->ie_len, IWL_MAX_SCAN_SIZE - sizeof(*scan)); - } else { + break; + case IWL_SCAN_RADIO_RESET: /* use bcast addr, will not be transmitted but must be valid */ cmd_len = iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, iwl_bcast_addr, NULL, 0, IWL_MAX_SCAN_SIZE - sizeof(*scan)); - + break; + case IWL_SCAN_OFFCH_TX: + cmd_len = iwl_fill_offch_tx(priv, scan->data, + IWL_MAX_SCAN_SIZE + - sizeof(*scan) + - sizeof(struct iwl_scan_channel)); + scan->scan_flags |= IWL_SCAN_FLAGS_ACTION_FRAME_TX; + break; + default: + BUG(); } scan->tx_cmd.len = cpu_to_le16(cmd_len); scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK); - if (priv->is_internal_short_scan) { + switch (priv->scan_type) { + case IWL_SCAN_RADIO_RESET: scan->channel_count = iwl_get_single_channel_for_scan(priv, vif, band, - (void *)&scan->data[le16_to_cpu( - scan->tx_cmd.len)]); - } else { + (void *)&scan->data[cmd_len]); + break; + case IWL_SCAN_NORMAL: scan->channel_count = iwl_get_channels_for_scan(priv, vif, band, is_active, n_probes, - (void *)&scan->data[le16_to_cpu( - scan->tx_cmd.len)]); + (void *)&scan->data[cmd_len]); + break; + case IWL_SCAN_OFFCH_TX: { + struct iwl_scan_channel *scan_ch; + + scan->channel_count = 1; + + scan_ch = (void *)&scan->data[cmd_len]; + scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE; + scan_ch->channel = + cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value); + scan_ch->active_dwell = + cpu_to_le16(priv->_agn.offchan_tx_timeout); + scan_ch->passive_dwell = 0; + + /* Set txpower levels to defaults */ + scan_ch->dsp_atten = 110; + + /* NOTE: if we were doing 6Mb OFDM for scans we'd use + * power level: + * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3; + */ + if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ) + scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3; + else + scan_ch->tx_gain = ((1 << 5) | (5 << 3)); + } + break; } + if (scan->channel_count == 0) { IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); return -EIO; @@ -1801,26 +1642,39 @@ static const __le32 iwlagn_concurrent_lookup[12] = { void iwlagn_send_advance_bt_config(struct iwl_priv *priv) { - struct iwlagn_bt_cmd bt_cmd = { + struct iwl_basic_bt_cmd basic = { .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT, .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT, .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT, .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT, }; + struct iwl6000_bt_cmd bt_cmd_6000; + struct iwl2000_bt_cmd bt_cmd_2000; + int ret; BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != - sizeof(bt_cmd.bt3_lookup_table)); - - if (priv->cfg->bt_params) - bt_cmd.prio_boost = priv->cfg->bt_params->bt_prio_boost; - else - bt_cmd.prio_boost = 0; - bt_cmd.kill_ack_mask = priv->kill_ack_mask; - bt_cmd.kill_cts_mask = priv->kill_cts_mask; + sizeof(basic.bt3_lookup_table)); + + if (priv->cfg->bt_params) { + if (priv->cfg->bt_params->bt_session_2) { + bt_cmd_2000.prio_boost = cpu_to_le32( + priv->cfg->bt_params->bt_prio_boost); + bt_cmd_2000.tx_prio_boost = 0; + bt_cmd_2000.rx_prio_boost = 0; + } else { + bt_cmd_6000.prio_boost = + priv->cfg->bt_params->bt_prio_boost; + bt_cmd_6000.tx_prio_boost = 0; + bt_cmd_6000.rx_prio_boost = 0; + } + } else { + IWL_ERR(priv, "failed to construct BT Coex Config\n"); + return; + } - bt_cmd.valid = priv->bt_valid; - bt_cmd.tx_prio_boost = 0; - bt_cmd.rx_prio_boost = 0; + basic.kill_ack_mask = priv->kill_ack_mask; + basic.kill_cts_mask = priv->kill_cts_mask; + basic.valid = priv->bt_valid; /* * Configure BT coex mode to "no coexistence" when the @@ -1829,49 +1683,45 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv) * IBSS mode (no proper uCode support for coex then). */ if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) { - bt_cmd.flags = 0; + basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED; } else { - bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W << + basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W << IWLAGN_BT_FLAG_COEX_MODE_SHIFT; if (priv->cfg->bt_params && priv->cfg->bt_params->bt_sco_disable) - bt_cmd.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE; + basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE; if (priv->bt_ch_announce) - bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION; - IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags); + basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION; + IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags); } - priv->bt_enable_flag = bt_cmd.flags; + priv->bt_enable_flag = basic.flags; if (priv->bt_full_concurrent) - memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup, + memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup, sizeof(iwlagn_concurrent_lookup)); else - memcpy(bt_cmd.bt3_lookup_table, iwlagn_def_3w_lookup, + memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup, sizeof(iwlagn_def_3w_lookup)); IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n", - bt_cmd.flags ? "active" : "disabled", + basic.flags ? "active" : "disabled", priv->bt_full_concurrent ? "full concurrency" : "3-wire"); - if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd)) + if (priv->cfg->bt_params->bt_session_2) { + memcpy(&bt_cmd_2000.basic, &basic, + sizeof(basic)); + ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, + sizeof(bt_cmd_2000), &bt_cmd_2000); + } else { + memcpy(&bt_cmd_6000.basic, &basic, + sizeof(basic)); + ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, + sizeof(bt_cmd_6000), &bt_cmd_6000); + } + if (ret) IWL_ERR(priv, "failed to send BT Coex Config\n"); - /* - * When we are doing a restart, need to also reconfigure BT - * SCO to the device. If not doing a restart, bt_sco_active - * will always be false, so there's no need to have an extra - * variable to check for it. - */ - if (priv->bt_sco_active) { - struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 }; - - if (priv->bt_sco_active) - sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE; - if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_SCO, - sizeof(sco_cmd), &sco_cmd)) - IWL_ERR(priv, "failed to send BT SCO command\n"); - } } static void iwlagn_bt_traffic_change_work(struct work_struct *work) @@ -1881,6 +1731,11 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work) struct iwl_rxon_context *ctx; int smps_request = -1; + if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) { + /* bt coex disabled */ + return; + } + /* * Note: bt_traffic_load can be overridden by scan complete and * coex profile notifications. Ignore that since only bad consequence @@ -1991,12 +1846,14 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv, (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >> BT_UART_MSG_FRAME6DISCOVERABLE_POS); - IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = " - "0x%X, Connectable = 0x%X", + IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = " + "0x%X, Inquiry = 0x%X, Connectable = 0x%X", (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >> BT_UART_MSG_FRAME7SNIFFACTIVITY_POS, - (BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK & uart_msg->frame7) >> - BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS, + (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >> + BT_UART_MSG_FRAME7PAGE_POS, + (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >> + BT_UART_MSG_FRAME7INQUIRY_POS, (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >> BT_UART_MSG_FRAME7CONNECTABLE_POS); } @@ -2032,9 +1889,13 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, unsigned long flags; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif; - struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 }; struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg; + if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) { + /* bt coex disabled */ + return; + } + IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n"); IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status); IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load); @@ -2063,15 +1924,6 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, queue_work(priv->workqueue, &priv->bt_traffic_change_work); } - if (priv->bt_sco_active != - (uart_msg->frame3 & BT_UART_MSG_FRAME3SCOESCO_MSK)) { - priv->bt_sco_active = uart_msg->frame3 & - BT_UART_MSG_FRAME3SCOESCO_MSK; - if (priv->bt_sco_active) - sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE; - iwl_send_cmd_pdu_async(priv, REPLY_BT_COEX_SCO, - sizeof(sco_cmd), &sco_cmd, NULL); - } } iwlagn_set_kill_msk(priv, uart_msg); @@ -2389,3 +2241,44 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display) } return 0; } + +/* notification wait support */ +void iwlagn_init_notification_wait(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry, + void (*fn)(struct iwl_priv *priv, + struct iwl_rx_packet *pkt), + u8 cmd) +{ + wait_entry->fn = fn; + wait_entry->cmd = cmd; + wait_entry->triggered = false; + + spin_lock_bh(&priv->_agn.notif_wait_lock); + list_add(&wait_entry->list, &priv->_agn.notif_waits); + spin_unlock_bh(&priv->_agn.notif_wait_lock); +} + +signed long iwlagn_wait_notification(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry, + unsigned long timeout) +{ + int ret; + + ret = wait_event_timeout(priv->_agn.notif_waitq, + &wait_entry->triggered, + timeout); + + spin_lock_bh(&priv->_agn.notif_wait_lock); + list_del(&wait_entry->list); + spin_unlock_bh(&priv->_agn.notif_wait_lock); + + return ret; +} + +void iwlagn_remove_notification(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry) +{ + spin_lock_bh(&priv->_agn.notif_wait_lock); + list_del(&wait_entry->list); + spin_unlock_bh(&priv->_agn.notif_wait_lock); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 75fcd30a7c13..d03b4734c892 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -179,31 +179,31 @@ static s32 expected_tpt_legacy[IWL_RATE_COUNT] = { }; static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */ - {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */ - {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */ - {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */ + {0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */ + {0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */ + {0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */ + {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */ }; static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */ - {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */ - {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */ + {0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */ + {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */ }; static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { - {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */ - {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */ - {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */ - {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/ + {0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */ + {0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */ + {0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */ + {0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/ }; static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */ - {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */ - {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */ + {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */ + {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */ }; static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = { @@ -2890,6 +2890,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, u8 ant_toggle_cnt = 0; u8 use_ht_possible = 1; u8 valid_tx_ant = 0; + struct iwl_station_priv *sta_priv = + container_of(lq_sta, struct iwl_station_priv, lq_sta); struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq; /* Override starting rate (index 0) if needed for debug purposes */ @@ -3008,7 +3010,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv, repeat_rate--; } - lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + lq_cmd->agg_params.agg_frame_cnt_limit = + sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF; lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; lq_cmd->agg_params.agg_time_limit = diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h index 75e50d33ecb3..184828c72b31 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h @@ -213,6 +213,7 @@ enum { IWL_CCK_BASIC_RATES_MASK) #define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) +#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1) #define IWL_INVALID_VALUE -1 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 6d140bd53291..dfdbea6e8f99 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -52,10 +52,14 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_rxon_cmd *send) { + struct iwl_notification_wait disable_wait; __le32 old_filter = send->filter_flags; u8 old_dev_type = send->dev_type; int ret; + iwlagn_init_notification_wait(priv, &disable_wait, NULL, + REPLY_WIPAN_DEACTIVATION_COMPLETE); + send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; send->dev_type = RXON_DEV_TYPE_P2P; ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send); @@ -63,11 +67,18 @@ static int iwlagn_disable_pan(struct iwl_priv *priv, send->filter_flags = old_filter; send->dev_type = old_dev_type; - if (ret) + if (ret) { IWL_ERR(priv, "Error disabling PAN (%d)\n", ret); - - /* FIXME: WAIT FOR PAN DISABLE */ - msleep(300); + iwlagn_remove_notification(priv, &disable_wait); + } else { + signed long wait_res; + + wait_res = iwlagn_wait_notification(priv, &disable_wait, HZ); + if (wait_res == 0) { + IWL_ERR(priv, "Timed out waiting for PAN disable\n"); + ret = -EIO; + } + } return ret; } @@ -145,6 +156,23 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) /* always get timestamp with Rx frame */ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; + if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) { + struct ieee80211_channel *chan = priv->_agn.hw_roc_channel; + + iwl_set_rxon_channel(priv, chan, ctx); + iwl_set_flags_for_band(priv, ctx, chan->band, NULL); + ctx->staging.filter_flags |= + RXON_FILTER_ASSOC_MSK | + RXON_FILTER_PROMISC_MSK | + RXON_FILTER_CTL2HOST_MSK; + ctx->staging.dev_type = RXON_DEV_TYPE_P2P; + new_assoc = true; + + if (memcmp(&ctx->staging, &ctx->active, + sizeof(ctx->staging)) == 0) + return 0; + } + if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; @@ -288,10 +316,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) * If we issue a new RXON command which required a tune then we must * send a new TXPOWER command or we won't be able to Tx any frames. * - * FIXME: which RXON requires a tune? Can we optimise this out in - * some cases? + * It's expected we set power here if channel is changing. */ - ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); + ret = iwl_set_tx_power(priv, priv->tx_power_next, true); if (ret) { IWL_ERR(priv, "Error sending TX power (%d)\n", ret); return ret; @@ -444,6 +471,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv, struct iwl_rxon_context *tmp; struct ieee80211_sta *sta; struct iwl_ht_config *ht_conf = &priv->current_ht_config; + struct ieee80211_sta_ht_cap *ht_cap; bool need_multiple; lockdep_assert_held(&priv->mutex); @@ -452,23 +480,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv, case NL80211_IFTYPE_STATION: rcu_read_lock(); sta = ieee80211_find_sta(vif, bss_conf->bssid); - if (sta) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - int maxstreams; - - maxstreams = (ht_cap->mcs.tx_params & - IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) - >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; - maxstreams += 1; - - need_multiple = true; - - if ((ht_cap->mcs.rx_mask[1] == 0) && - (ht_cap->mcs.rx_mask[2] == 0)) - need_multiple = false; - if (maxstreams <= 1) - need_multiple = false; - } else { + if (!sta) { /* * If at all, this can only happen through a race * when the AP disconnects us while we're still @@ -476,7 +488,46 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv, * will soon tell us about that. */ need_multiple = false; + rcu_read_unlock(); + break; + } + + ht_cap = &sta->ht_cap; + + need_multiple = true; + + /* + * If the peer advertises no support for receiving 2 and 3 + * stream MCS rates, it can't be transmitting them either. + */ + if (ht_cap->mcs.rx_mask[1] == 0 && + ht_cap->mcs.rx_mask[2] == 0) { + need_multiple = false; + } else if (!(ht_cap->mcs.tx_params & + IEEE80211_HT_MCS_TX_DEFINED)) { + /* If it can't TX MCS at all ... */ + need_multiple = false; + } else if (ht_cap->mcs.tx_params & + IEEE80211_HT_MCS_TX_RX_DIFF) { + int maxstreams; + + /* + * But if it can receive them, it might still not + * be able to transmit them, which is what we need + * to check here -- so check the number of streams + * it advertises for TX (if different from RX). + */ + + maxstreams = (ht_cap->mcs.tx_params & + IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK); + maxstreams >>= + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; + maxstreams += 1; + + if (maxstreams <= 1) + need_multiple = false; } + rcu_read_unlock(); break; case NL80211_IFTYPE_ADHOC: @@ -546,12 +597,10 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, if (changes & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { - iwl_led_associate(priv); priv->timestamp = bss_conf->timestamp; ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; } else { ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - iwl_led_disassociate(priv); } } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 24a11b8f73bc..a709d05c5868 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -539,7 +539,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) unsigned long flags; bool is_agg = false; - if (info->control.vif) + /* + * If the frame needs to go out off-channel, then + * we'll have put the PAN context to that channel, + * so make the frame go out there. + */ + if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) + ctx = &priv->contexts[IWL_RXON_CTX_PAN]; + else if (info->control.vif) ctx = iwl_rxon_ctx_from_vif(info->control.vif); spin_lock_irqsave(&priv->lock, flags); @@ -940,7 +947,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv) */ void iwlagn_txq_ctx_stop(struct iwl_priv *priv) { - int ch; + int ch, txq_id; unsigned long flags; /* Turn off all Tx DMA fifos */ @@ -959,6 +966,16 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv) iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); } spin_unlock_irqrestore(&priv->lock, flags); + + if (!priv->txq) + return; + + /* Unmap DMA from host system and free skb's */ + for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) + if (txq_id == priv->cmd_queue) + iwl_cmd_queue_unmap(priv); + else + iwl_tx_queue_unmap(priv, txq_id); } /* diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c index 24dabcd2a36c..d807e5e2b718 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c @@ -308,14 +308,6 @@ void iwlagn_init_alive_start(struct iwl_priv *priv) { int ret = 0; - /* Check alive response for "valid" sign from uCode */ - if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { - /* We had an error bringing up the hardware, so take it - * all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); - goto restart; - } - /* initialize uCode was loaded... verify inst image. * This is a paranoid check, because we would not have gotten the * "initialize" alive if code weren't properly loaded. */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index c1cfd9952e52..581dc9f10273 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -59,6 +59,7 @@ #include "iwl-sta.h" #include "iwl-agn-calib.h" #include "iwl-agn.h" +#include "iwl-agn-led.h" /****************************************************************************** @@ -85,7 +86,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); MODULE_LICENSE("GPL"); -MODULE_ALIAS("iwl4965"); static int iwlagn_ant_coupling; static bool iwlagn_bt_ch_announce = 1; @@ -424,47 +424,6 @@ int iwl_hw_tx_queue_init(struct iwl_priv *priv, return 0; } -/****************************************************************************** - * - * Generic RX handler implementations - * - ******************************************************************************/ -static void iwl_rx_reply_alive(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_alive_resp *palive; - struct delayed_work *pwork; - - palive = &pkt->u.alive_frame; - - IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " - "0x%01X 0x%01X\n", - palive->is_valid, palive->ver_type, - palive->ver_subtype); - - if (palive->ver_subtype == INITIALIZE_SUBTYPE) { - IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); - memcpy(&priv->card_alive_init, - &pkt->u.alive_frame, - sizeof(struct iwl_init_alive_resp)); - pwork = &priv->init_alive_start; - } else { - IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); - memcpy(&priv->card_alive, &pkt->u.alive_frame, - sizeof(struct iwl_alive_resp)); - pwork = &priv->alive_start; - } - - /* We delay the ALIVE response by 5ms to - * give the HW RF Kill time to activate... */ - if (palive->is_valid == UCODE_VALID_OK) - queue_delayed_work(priv->workqueue, pwork, - msecs_to_jiffies(5)); - else - IWL_WARN(priv, "uCode did not respond OK.\n"); -} - static void iwl_bg_beacon_update(struct work_struct *work) { struct iwl_priv *priv = @@ -699,83 +658,6 @@ static void iwl_bg_ucode_trace(unsigned long data) } } -static void iwl_rx_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl4965_beacon_notif *beacon = - (struct iwl4965_beacon_notif *)pkt->u.raw; -#ifdef CONFIG_IWLWIFI_DEBUG - u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); - - IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " - "tsf %d %d rate %d\n", - le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK, - beacon->beacon_notify_hdr.failure_frame, - le32_to_cpu(beacon->ibss_mgr_status), - le32_to_cpu(beacon->high_tsf), - le32_to_cpu(beacon->low_tsf), rate); -#endif - - priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); - - if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) - queue_work(priv->workqueue, &priv->beacon_update); -} - -/* Handle notification from uCode that card's power state is changing - * due to software, hardware, or critical temperature RFKILL */ -static void iwl_rx_card_state_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); - unsigned long status = priv->status; - - IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", - (flags & HW_CARD_DISABLED) ? "Kill" : "On", - (flags & SW_CARD_DISABLED) ? "Kill" : "On", - (flags & CT_CARD_DISABLED) ? - "Reached" : "Not reached"); - - if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | - CT_CARD_DISABLED)) { - - iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - - iwl_write_direct32(priv, HBUS_TARG_MBX_C, - HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); - - if (!(flags & RXON_CARD_DISABLED)) { - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); - iwl_write_direct32(priv, HBUS_TARG_MBX_C, - HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); - } - if (flags & CT_CARD_DISABLED) - iwl_tt_enter_ct_kill(priv); - } - if (!(flags & CT_CARD_DISABLED)) - iwl_tt_exit_ct_kill(priv); - - if (flags & HW_CARD_DISABLED) - set_bit(STATUS_RF_KILL_HW, &priv->status); - else - clear_bit(STATUS_RF_KILL_HW, &priv->status); - - - if (!(flags & RXON_CARD_DISABLED)) - iwl_scan_cancel(priv); - - if ((test_bit(STATUS_RF_KILL_HW, &status) != - test_bit(STATUS_RF_KILL_HW, &priv->status))) - wiphy_rfkill_set_hw_state(priv->hw->wiphy, - test_bit(STATUS_RF_KILL_HW, &priv->status)); - else - wake_up_interruptible(&priv->wait_command_queue); -} - static void iwl_bg_tx_flush(struct work_struct *work) { struct iwl_priv *priv = @@ -795,58 +677,13 @@ static void iwl_bg_tx_flush(struct work_struct *work) } /** - * iwl_setup_rx_handlers - Initialize Rx handler callbacks - * - * Setup the RX handlers for each of the reply types sent from the uCode - * to the host. - * - * This function chains into the hardware specific files for them to setup - * any hardware specific handlers as well. - */ -static void iwl_setup_rx_handlers(struct iwl_priv *priv) -{ - priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; - priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; - priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; - priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = - iwl_rx_spectrum_measure_notif; - priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; - priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = - iwl_rx_pm_debug_statistics_notif; - priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; - - /* - * The same handler is used for both the REPLY to a discrete - * statistics request from the host as well as for the periodic - * statistics notifications (after received beacons) from the uCode. - */ - priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics; - priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; - - iwl_setup_rx_scan_handlers(priv); - - /* status change handler */ - priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; - - priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = - iwl_rx_missed_beacon_notif; - /* Rx handlers */ - priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy; - priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx; - /* block ack */ - priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba; - /* Set up hardware specific Rx handlers */ - priv->cfg->ops->lib->rx_handler_setup(priv); -} - -/** * iwl_rx_handle - Main entry function for receiving responses from uCode * * Uses the priv->rx_handlers callback function array to invoke * the appropriate handlers, including command responses, * frame-received notifications, and other notifications. */ -void iwl_rx_handle(struct iwl_priv *priv) +static void iwl_rx_handle(struct iwl_priv *priv) { struct iwl_rx_mem_buffer *rxb; struct iwl_rx_packet *pkt; @@ -910,6 +747,27 @@ void iwl_rx_handle(struct iwl_priv *priv) (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && (pkt->hdr.cmd != REPLY_TX); + /* + * Do the notification wait before RX handlers so + * even if the RX handler consumes the RXB we have + * access to it in the notification wait entry. + */ + if (!list_empty(&priv->_agn.notif_waits)) { + struct iwl_notification_wait *w; + + spin_lock(&priv->_agn.notif_wait_lock); + list_for_each_entry(w, &priv->_agn.notif_waits, list) { + if (w->cmd == pkt->hdr.cmd) { + w->triggered = true; + if (w->fn) + w->fn(priv, pkt); + } + } + spin_unlock(&priv->_agn.notif_wait_lock); + + wake_up_all(&priv->_agn.notif_waitq); + } + /* Based on type of command response or notification, * handle those that need handling via function in * rx_handlers table. See iwl_setup_rx_handlers() */ @@ -1379,66 +1237,6 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) iwl_enable_rfkill_int(priv); } -/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ -#define ACK_CNT_RATIO (50) -#define BA_TIMEOUT_CNT (5) -#define BA_TIMEOUT_MAX (16) - -/** - * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. - * - * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding - * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal - * operation state. - */ -bool iwl_good_ack_health(struct iwl_priv *priv, - struct iwl_rx_packet *pkt) -{ - bool rc = true; - int actual_ack_cnt_delta, expected_ack_cnt_delta; - int ba_timeout_delta; - - actual_ack_cnt_delta = - le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) - - le32_to_cpu(priv->_agn.statistics.tx.actual_ack_cnt); - expected_ack_cnt_delta = - le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) - - le32_to_cpu(priv->_agn.statistics.tx.expected_ack_cnt); - ba_timeout_delta = - le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) - - le32_to_cpu(priv->_agn.statistics.tx.agg.ba_timeout); - if ((priv->_agn.agg_tids_count > 0) && - (expected_ack_cnt_delta > 0) && - (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta) - < ACK_CNT_RATIO) && - (ba_timeout_delta > BA_TIMEOUT_CNT)) { - IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d," - " expected_ack_cnt = %d\n", - actual_ack_cnt_delta, expected_ack_cnt_delta); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - /* - * This is ifdef'ed on DEBUGFS because otherwise the - * statistics aren't available. If DEBUGFS is set but - * DEBUG is not, these will just compile out. - */ - IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n", - priv->_agn.delta_statistics.tx.rx_detected_cnt); - IWL_DEBUG_RADIO(priv, - "ack_or_ba_timeout_collision delta = %d\n", - priv->_agn.delta_statistics.tx. - ack_or_ba_timeout_collision); -#endif - IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n", - ba_timeout_delta); - if (!actual_ack_cnt_delta && - (ba_timeout_delta >= BA_TIMEOUT_MAX)) - rc = false; - } - return rc; -} - - /***************************************************************************** * * sysfs attributes @@ -2632,13 +2430,6 @@ static void iwl_alive_start(struct iwl_priv *priv) IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); - if (priv->card_alive.is_valid != UCODE_VALID_OK) { - /* We had an error bringing up the hardware, so take it - * all the way back down so we can try again */ - IWL_DEBUG_INFO(priv, "Alive failed.\n"); - goto restart; - } - /* Initialize uCode has loaded Runtime uCode ... verify inst image. * This is a paranoid check, because we would not have gotten the * "runtime" alive if code weren't properly loaded. */ @@ -2710,9 +2501,11 @@ static void iwl_alive_start(struct iwl_priv *priv) priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); } - if (priv->cfg->bt_params && - !priv->cfg->bt_params->advanced_bt_coexist) { - /* Configure Bluetooth device coexistence support */ + if (!priv->cfg->bt_params || (priv->cfg->bt_params && + !priv->cfg->bt_params->advanced_bt_coexist)) { + /* + * default is 2-wire BT coexexistence support + */ priv->cfg->ops->hcmd->send_bt_config(priv); } @@ -2726,8 +2519,6 @@ static void iwl_alive_start(struct iwl_priv *priv) /* At this point, the NIC is initialized and operational */ iwl_rf_kill_ct_config(priv); - iwl_leds_init(priv); - IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); wake_up_interruptible(&priv->wait_command_queue); @@ -2769,7 +2560,6 @@ static void __iwl_down(struct iwl_priv *priv) priv->cfg->bt_params->bt_init_traffic_load; else priv->bt_traffic_load = 0; - priv->bt_sco_active = false; priv->bt_full_concurrent = false; priv->bt_ci_compliance = 0; @@ -3063,8 +2853,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work) } if (priv->start_calib) { - if (priv->cfg->bt_params && - priv->cfg->bt_params->bt_statistics) { + if (iwl_bt_statistics(priv)) { iwl_chain_noise_calibration(priv, (void *)&priv->_agn.statistics_bt); iwl_sensitivity_calibration(priv, @@ -3089,7 +2878,7 @@ static void iwl_bg_restart(struct work_struct *data) if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { struct iwl_rxon_context *ctx; - bool bt_sco, bt_full_concurrent; + bool bt_full_concurrent; u8 bt_ci_compliance; u8 bt_load; u8 bt_status; @@ -3108,7 +2897,6 @@ static void iwl_bg_restart(struct work_struct *data) * re-configure the hw when we reconfigure the BT * command. */ - bt_sco = priv->bt_sco_active; bt_full_concurrent = priv->bt_full_concurrent; bt_ci_compliance = priv->bt_ci_compliance; bt_load = priv->bt_traffic_load; @@ -3116,7 +2904,6 @@ static void iwl_bg_restart(struct work_struct *data) __iwl_down(priv); - priv->bt_sco_active = bt_sco; priv->bt_full_concurrent = bt_full_concurrent; priv->bt_ci_compliance = bt_ci_compliance; priv->bt_traffic_load = bt_load; @@ -3150,6 +2937,91 @@ static void iwl_bg_rx_replenish(struct work_struct *data) mutex_unlock(&priv->mutex); } +static int iwl_mac_offchannel_tx(struct ieee80211_hw *hw, struct sk_buff *skb, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type, + unsigned int wait) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + /* Not supported if we don't have PAN */ + if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) { + ret = -EOPNOTSUPP; + goto free; + } + + /* Not supported on pre-P2P firmware */ + if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes & + BIT(NL80211_IFTYPE_P2P_CLIENT))) { + ret = -EOPNOTSUPP; + goto free; + } + + mutex_lock(&priv->mutex); + + if (!priv->contexts[IWL_RXON_CTX_PAN].is_active) { + /* + * If the PAN context is free, use the normal + * way of doing remain-on-channel offload + TX. + */ + ret = 1; + goto out; + } + + /* TODO: queue up if scanning? */ + if (test_bit(STATUS_SCANNING, &priv->status) || + priv->_agn.offchan_tx_skb) { + ret = -EBUSY; + goto out; + } + + /* + * max_scan_ie_len doesn't include the blank SSID or the header, + * so need to add that again here. + */ + if (skb->len > hw->wiphy->max_scan_ie_len + 24 + 2) { + ret = -ENOBUFS; + goto out; + } + + priv->_agn.offchan_tx_skb = skb; + priv->_agn.offchan_tx_timeout = wait; + priv->_agn.offchan_tx_chan = chan; + + ret = iwl_scan_initiate(priv, priv->contexts[IWL_RXON_CTX_PAN].vif, + IWL_SCAN_OFFCH_TX, chan->band); + if (ret) + priv->_agn.offchan_tx_skb = NULL; + out: + mutex_unlock(&priv->mutex); + free: + if (ret < 0) + kfree_skb(skb); + + return ret; +} + +static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + int ret; + + mutex_lock(&priv->mutex); + + if (!priv->_agn.offchan_tx_skb) + return -EINVAL; + + priv->_agn.offchan_tx_skb = NULL; + + ret = iwl_scan_cancel_timeout(priv, 200); + if (ret) + ret = -EIO; + mutex_unlock(&priv->mutex); + + return ret; +} + /***************************************************************************** * * mac80211 entry point functions @@ -3178,6 +3050,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv, IEEE80211_HW_SPECTRUM_MGMT | IEEE80211_HW_REPORTS_TX_ACK_STATUS; + hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; + if (!priv->cfg->base_params->broken_powersave) hw->flags |= IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; @@ -3194,8 +3068,11 @@ static int iwl_mac_setup_register(struct iwl_priv *priv, hw->wiphy->interface_modes |= ctx->exclusive_interface_modes; } + hw->wiphy->max_remain_on_channel_duration = 1000; + hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | - WIPHY_FLAG_DISABLE_BEACON_HINTS; + WIPHY_FLAG_DISABLE_BEACON_HINTS | + WIPHY_FLAG_IBSS_RSN; /* * For now, disable PS by default because it affects @@ -3219,6 +3096,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv, priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->bands[IEEE80211_BAND_5GHZ]; + iwl_leds_init(priv); + ret = ieee80211_register_hw(priv->hw); if (ret) { IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); @@ -3263,7 +3142,7 @@ int iwlagn_mac_start(struct ieee80211_hw *hw) } } - iwl_led_start(priv); + iwlagn_led_enable(priv); out: priv->is_open = 1; @@ -3294,7 +3173,7 @@ void iwlagn_mac_stop(struct ieee80211_hw *hw) IWL_DEBUG_MAC80211(priv, "leave\n"); } -int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct iwl_priv *priv = hw->priv; @@ -3307,7 +3186,6 @@ int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) dev_kfree_skb_any(skb); IWL_DEBUG_MACDUMP(priv, "leave\n"); - return NETDEV_TX_OK; } void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, @@ -3345,6 +3223,14 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } + /* + * To support IBSS RSN, don't program group keys in IBSS, the + * hardware will then not attempt to decrypt the frames. + */ + if (vif->type == NL80211_IFTYPE_ADHOC && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + sta_id = iwl_sta_id_or_broadcast(priv, vif_priv->ctx, sta); if (sta_id == IWL_INVALID_STATION) return -EINVAL; @@ -3399,10 +3285,12 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn) + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) { struct iwl_priv *priv = hw->priv; int ret = -EINVAL; + struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", sta->addr, tid); @@ -3457,11 +3345,28 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, } break; case IEEE80211_AMPDU_TX_OPERATIONAL: + /* + * If the limit is 0, then it wasn't initialised yet, + * use the default. We can do that since we take the + * minimum below, and we don't want to go above our + * default due to hardware restrictions. + */ + if (sta_priv->max_agg_bufsize == 0) + sta_priv->max_agg_bufsize = + LINK_QUAL_AGG_FRAME_LIMIT_DEF; + + /* + * Even though in theory the peer could have different + * aggregation reorder buffer sizes for different sessions, + * our ucode doesn't allow for that and has a global limit + * for each station. Therefore, use the minimum of all the + * aggregation sessions and our default value. + */ + sta_priv->max_agg_bufsize = + min(sta_priv->max_agg_bufsize, buf_size); + if (priv->cfg->ht_params && priv->cfg->ht_params->use_rts_for_aggregation) { - struct iwl_station_priv *sta_priv = - (void *) sta->drv_priv; - /* * switch to RTS/CTS if it is the prefer protection * method for HT traffic @@ -3469,9 +3374,13 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, sta_priv->lq_sta.lq.general_params.flags |= LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; - iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), - &sta_priv->lq_sta.lq, CMD_ASYNC, false); } + + sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit = + sta_priv->max_agg_bufsize; + + iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif), + &sta_priv->lq_sta.lq, CMD_ASYNC, false); ret = 0; break; } @@ -3709,6 +3618,95 @@ done: IWL_DEBUG_MAC80211(priv, "leave\n"); } +static void iwlagn_disable_roc(struct iwl_priv *priv) +{ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; + struct ieee80211_channel *chan = ACCESS_ONCE(priv->hw->conf.channel); + + lockdep_assert_held(&priv->mutex); + + if (!ctx->is_active) + return; + + ctx->staging.dev_type = RXON_DEV_TYPE_2STA; + ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; + iwl_set_rxon_channel(priv, chan, ctx); + iwl_set_flags_for_band(priv, ctx, chan->band, NULL); + + priv->_agn.hw_roc_channel = NULL; + + iwlcore_commit_rxon(priv, ctx); + + ctx->is_active = false; +} + +static void iwlagn_bg_roc_done(struct work_struct *work) +{ + struct iwl_priv *priv = container_of(work, struct iwl_priv, + _agn.hw_roc_work.work); + + mutex_lock(&priv->mutex); + ieee80211_remain_on_channel_expired(priv->hw); + iwlagn_disable_roc(priv); + mutex_unlock(&priv->mutex); +} + +static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw, + struct ieee80211_channel *channel, + enum nl80211_channel_type channel_type, + int duration) +{ + struct iwl_priv *priv = hw->priv; + int err = 0; + + if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) + return -EOPNOTSUPP; + + if (!(priv->contexts[IWL_RXON_CTX_PAN].interface_modes & + BIT(NL80211_IFTYPE_P2P_CLIENT))) + return -EOPNOTSUPP; + + mutex_lock(&priv->mutex); + + if (priv->contexts[IWL_RXON_CTX_PAN].is_active || + test_bit(STATUS_SCAN_HW, &priv->status)) { + err = -EBUSY; + goto out; + } + + priv->contexts[IWL_RXON_CTX_PAN].is_active = true; + priv->_agn.hw_roc_channel = channel; + priv->_agn.hw_roc_chantype = channel_type; + priv->_agn.hw_roc_duration = DIV_ROUND_UP(duration * 1000, 1024); + iwlcore_commit_rxon(priv, &priv->contexts[IWL_RXON_CTX_PAN]); + queue_delayed_work(priv->workqueue, &priv->_agn.hw_roc_work, + msecs_to_jiffies(duration + 20)); + + msleep(IWL_MIN_SLOT_TIME); /* TU is almost ms */ + ieee80211_ready_on_channel(priv->hw); + + out: + mutex_unlock(&priv->mutex); + + return err; +} + +static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) +{ + struct iwl_priv *priv = hw->priv; + + if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) + return -EOPNOTSUPP; + + cancel_delayed_work_sync(&priv->_agn.hw_roc_work); + + mutex_lock(&priv->mutex); + iwlagn_disable_roc(priv); + mutex_unlock(&priv->mutex); + + return 0; +} + /***************************************************************************** * * driver setup and teardown @@ -3730,6 +3728,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv) INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config); INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); + INIT_DELAYED_WORK(&priv->_agn.hw_roc_work, iwlagn_bg_roc_done); iwl_setup_scan_deferred_work(priv); @@ -3823,6 +3822,8 @@ static int iwl_init_drv(struct iwl_priv *priv) priv->force_reset[IWL_FW_RESET].reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD; + priv->rx_statistics_jiffies = jiffies; + /* Choose which receivers/antennas to use */ if (priv->cfg->ops->hcmd->set_rxon_chain) priv->cfg->ops->hcmd->set_rxon_chain(priv, @@ -3876,7 +3877,6 @@ static void iwl_uninit_drv(struct iwl_priv *priv) kfree(priv->scan_cmd); } -#ifdef CONFIG_IWL5000 struct ieee80211_ops iwlagn_hw_ops = { .tx = iwlagn_mac_tx, .start = iwlagn_mac_start, @@ -3898,14 +3898,17 @@ struct ieee80211_ops iwlagn_hw_ops = { .channel_switch = iwlagn_mac_channel_switch, .flush = iwlagn_mac_flush, .tx_last_beacon = iwl_mac_tx_last_beacon, + .remain_on_channel = iwl_mac_remain_on_channel, + .cancel_remain_on_channel = iwl_mac_cancel_remain_on_channel, + .offchannel_tx = iwl_mac_offchannel_tx, + .offchannel_tx_cancel_wait = iwl_mac_offchannel_tx_cancel_wait, }; -#endif static void iwl_hw_detect(struct iwl_priv *priv) { priv->hw_rev = _iwl_read32(priv, CSR_HW_REV); priv->hw_wa_rev = _iwl_read32(priv, CSR_HW_REV_WA_REG); - pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &priv->rev_id); + priv->rev_id = priv->pci_dev->revision; IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id); } @@ -3967,12 +3970,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (cfg->mod_params->disable_hw_scan) { dev_printk(KERN_DEBUG, &(pdev->dev), "sw scan support is deprecated\n"); -#ifdef CONFIG_IWL5000 iwlagn_hw_ops.hw_scan = NULL; -#endif -#ifdef CONFIG_IWL4965 - iwl4965_hw_ops.hw_scan = NULL; -#endif } hw = iwl_alloc_all(cfg); @@ -4025,6 +4023,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE; priv->contexts[IWL_RXON_CTX_PAN].interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); +#ifdef CONFIG_IWL_P2P + priv->contexts[IWL_RXON_CTX_PAN].interface_modes |= + BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); +#endif priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; @@ -4272,6 +4274,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) * we need to set STATUS_EXIT_PENDING bit. */ set_bit(STATUS_EXIT_PENDING, &priv->status); + + iwl_leds_exit(priv); + if (priv->mac80211_registered) { ieee80211_unregister_hw(priv->hw); priv->mac80211_registered = 0; @@ -4344,12 +4349,6 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) /* Hardware specific file defines the PCI IDs table for that hardware module */ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { -#ifdef CONFIG_IWL4965 - {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)}, - {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, -#endif /* CONFIG_IWL4965 */ -#ifdef CONFIG_IWL5000 -/* 5100 Series WiFi */ {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */ {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */ @@ -4492,7 +4491,48 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)}, {IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)}, -#endif /* CONFIG_IWL5000 */ +/* 2x00 Series */ + {IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)}, + +/* 2x30 Series */ + {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)}, + {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)}, + +/* 6x35 Series */ + {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)}, + {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)}, + {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)}, + +/* 200 Series */ + {IWL_PCI_DEVICE(0x0894, 0x0022, iwl200_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0895, 0x0222, iwl200_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0422, iwl200_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0026, iwl200_bg_cfg)}, + {IWL_PCI_DEVICE(0x0895, 0x0226, iwl200_bg_cfg)}, + {IWL_PCI_DEVICE(0x0894, 0x0426, iwl200_bg_cfg)}, + +/* 230 Series */ + {IWL_PCI_DEVICE(0x0892, 0x0062, iwl230_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0893, 0x0262, iwl230_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0462, iwl230_bgn_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0066, iwl230_bg_cfg)}, + {IWL_PCI_DEVICE(0x0893, 0x0266, iwl230_bg_cfg)}, + {IWL_PCI_DEVICE(0x0892, 0x0466, iwl230_bg_cfg)}, {0} }; @@ -4592,3 +4632,9 @@ MODULE_PARM_DESC(antenna_coupling, module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO); MODULE_PARM_DESC(bt_ch_inhibition, "Disable BT channel inhibition (default: enable)"); + +module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO); +MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])"); + +module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO); +MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])"); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h index da303585f801..20f8e4188994 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn.h @@ -96,6 +96,17 @@ extern struct iwl_cfg iwl100_bgn_cfg; extern struct iwl_cfg iwl100_bg_cfg; extern struct iwl_cfg iwl130_bgn_cfg; extern struct iwl_cfg iwl130_bg_cfg; +extern struct iwl_cfg iwl2000_2bgn_cfg; +extern struct iwl_cfg iwl2000_2bg_cfg; +extern struct iwl_cfg iwl2030_2bgn_cfg; +extern struct iwl_cfg iwl2030_2bg_cfg; +extern struct iwl_cfg iwl6035_2agn_cfg; +extern struct iwl_cfg iwl6035_2abg_cfg; +extern struct iwl_cfg iwl6035_2bg_cfg; +extern struct iwl_cfg iwl200_bg_cfg; +extern struct iwl_cfg iwl200_bgn_cfg; +extern struct iwl_cfg iwl230_bg_cfg; +extern struct iwl_cfg iwl230_bgn_cfg; extern struct iwl_mod_params iwlagn_mod_params; extern struct iwl_hcmd_ops iwlagn_hcmd; @@ -110,8 +121,6 @@ void iwl_disable_ict(struct iwl_priv *priv); int iwl_alloc_isr_ict(struct iwl_priv *priv); void iwl_free_isr_ict(struct iwl_priv *priv); irqreturn_t iwl_isr_ict(int irq, void *data); -bool iwl_good_ack_health(struct iwl_priv *priv, - struct iwl_rx_packet *pkt); /* tx queue */ void iwlagn_set_wr_ptrs(struct iwl_priv *priv, @@ -181,11 +190,7 @@ void iwlagn_rx_replenish_now(struct iwl_priv *priv); void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); int iwlagn_rxq_stop(struct iwl_priv *priv); int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); -void iwlagn_rx_reply_rx(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl_rx_handle(struct iwl_priv *priv); +void iwl_setup_rx_handlers(struct iwl_priv *priv); /* tx */ void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); @@ -235,16 +240,6 @@ static inline bool iwl_is_tx_success(u32 status) u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); -/* rx */ -void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -bool iwl_good_plcp_health(struct iwl_priv *priv, - struct iwl_rx_packet *pkt); -void iwl_rx_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl_reply_statistics(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); - /* scan */ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif); void iwlagn_post_scan(struct iwl_priv *priv); @@ -330,8 +325,23 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv); void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv); +/* notification wait support */ +void __acquires(wait_entry) +iwlagn_init_notification_wait(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry, + void (*fn)(struct iwl_priv *priv, + struct iwl_rx_packet *pkt), + u8 cmd); +signed long __releases(wait_entry) +iwlagn_wait_notification(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry, + unsigned long timeout); +void __releases(wait_entry) +iwlagn_remove_notification(struct iwl_priv *priv, + struct iwl_notification_wait *wait_entry); + /* mac80211 handlers (for 4965) */ -int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); int iwlagn_mac_start(struct ieee80211_hw *hw); void iwlagn_mac_stop(struct ieee80211_hw *hw); void iwlagn_configure_filter(struct ieee80211_hw *hw, @@ -349,7 +359,8 @@ void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn); + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size); int iwlagn_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index f893d4a6aa87..ca42ffa63ed7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -178,7 +178,6 @@ enum { REPLY_BT_COEX_PRIO_TABLE = 0xcc, REPLY_BT_COEX_PROT_ENV = 0xcd, REPLY_BT_COEX_PROFILE_NOTIF = 0xce, - REPLY_BT_COEX_SCO = 0xcf, /* PAN commands */ REPLY_WIPAN_PARAMS = 0xb2, @@ -189,6 +188,7 @@ enum { REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */ REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9, REPLY_WIPAN_NOA_NOTIFICATION = 0xbc, + REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd, REPLY_MAX = 0xff }; @@ -2477,7 +2477,7 @@ struct iwl_bt_cmd { IWLAGN_BT_VALID_BT4_TIMES | \ IWLAGN_BT_VALID_3W_LUT) -struct iwlagn_bt_cmd { +struct iwl_basic_bt_cmd { u8 flags; u8 ledtime; /* unused */ u8 max_kill; @@ -2490,6 +2490,10 @@ struct iwlagn_bt_cmd { __le32 bt3_lookup_table[12]; __le16 bt4_decision_time; /* unused */ __le16 valid; +}; + +struct iwl6000_bt_cmd { + struct iwl_basic_bt_cmd basic; u8 prio_boost; /* * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask @@ -2499,6 +2503,18 @@ struct iwlagn_bt_cmd { __le16 rx_prio_boost; /* SW boost of WiFi rx priority */ }; +struct iwl2000_bt_cmd { + struct iwl_basic_bt_cmd basic; + __le32 prio_boost; + /* + * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask + * if configure the following patterns + */ + u8 reserved; + u8 tx_prio_boost; /* SW boost of WiFi tx priority */ + __le16 rx_prio_boost; /* SW boost of WiFi rx priority */ +}; + #define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0)) struct iwlagn_bt_sco_cmd { @@ -2948,9 +2964,15 @@ struct iwl3945_scan_cmd { u8 data[0]; } __packed; +enum iwl_scan_flags { + /* BIT(0) currently unused */ + IWL_SCAN_FLAGS_ACTION_FRAME_TX = BIT(1), + /* bits 2-7 reserved */ +}; + struct iwl_scan_cmd { __le16 len; - u8 reserved0; + u8 scan_flags; /* scan flags: see enum iwl_scan_flags */ u8 channel_count; /* # channels in channel list */ __le16 quiet_time; /* dwell only this # millisecs on quiet channel * (only for active scan) */ @@ -3082,6 +3104,13 @@ struct iwl4965_beacon_notif { __le32 ibss_mgr_status; } __packed; +struct iwlagn_beacon_notif { + struct iwlagn_tx_resp beacon_notify_hdr; + __le32 low_tsf; + __le32 high_tsf; + __le32 ibss_mgr_status; +} __packed; + /* * REPLY_TX_BEACON = 0x91 (command, has simple generic response) */ @@ -4143,6 +4172,10 @@ enum iwl_bt_coex_profile_traffic_load { */ }; +#define BT_SESSION_ACTIVITY_1_UART_MSG 0x1 +#define BT_SESSION_ACTIVITY_2_UART_MSG 0x2 + +/* BT UART message - Share Part (BT -> WiFi) */ #define BT_UART_MSG_FRAME1MSGTYPE_POS (0) #define BT_UART_MSG_FRAME1MSGTYPE_MSK \ (0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS) @@ -4227,9 +4260,12 @@ enum iwl_bt_coex_profile_traffic_load { #define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0) #define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \ (0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS) -#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS (3) -#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK \ - (0x3 << BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS) +#define BT_UART_MSG_FRAME7PAGE_POS (3) +#define BT_UART_MSG_FRAME7PAGE_MSK \ + (0x1 << BT_UART_MSG_FRAME7PAGE_POS) +#define BT_UART_MSG_FRAME7INQUIRY_POS (4) +#define BT_UART_MSG_FRAME7INQUIRY_MSK \ + (0x1 << BT_UART_MSG_FRAME7INQUIRY_POS) #define BT_UART_MSG_FRAME7CONNECTABLE_POS (5) #define BT_UART_MSG_FRAME7CONNECTABLE_MSK \ (0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS) @@ -4237,6 +4273,83 @@ enum iwl_bt_coex_profile_traffic_load { #define BT_UART_MSG_FRAME7RESERVED_MSK \ (0x3 << BT_UART_MSG_FRAME7RESERVED_POS) +/* BT Session Activity 2 UART message (BT -> WiFi) */ +#define BT_UART_MSG_2_FRAME1RESERVED1_POS (5) +#define BT_UART_MSG_2_FRAME1RESERVED1_MSK \ + (0x1<<BT_UART_MSG_2_FRAME1RESERVED1_POS) +#define BT_UART_MSG_2_FRAME1RESERVED2_POS (6) +#define BT_UART_MSG_2_FRAME1RESERVED2_MSK \ + (0x3<<BT_UART_MSG_2_FRAME1RESERVED2_POS) + +#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS (0) +#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_MSK \ + (0x3F<<BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS) +#define BT_UART_MSG_2_FRAME2RESERVED_POS (6) +#define BT_UART_MSG_2_FRAME2RESERVED_MSK \ + (0x3<<BT_UART_MSG_2_FRAME2RESERVED_POS) + +#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS (0) +#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_MSK \ + (0xF<<BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS) +#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS (4) +#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_MSK \ + (0x1<<BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS) +#define BT_UART_MSG_2_FRAME3LEMASTER_POS (5) +#define BT_UART_MSG_2_FRAME3LEMASTER_MSK \ + (0x1<<BT_UART_MSG_2_FRAME3LEMASTER_POS) +#define BT_UART_MSG_2_FRAME3RESERVED_POS (6) +#define BT_UART_MSG_2_FRAME3RESERVED_MSK \ + (0x3<<BT_UART_MSG_2_FRAME3RESERVED_POS) + +#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS (0) +#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_MSK \ + (0xF<<BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS) +#define BT_UART_MSG_2_FRAME4NUMLECONN_POS (4) +#define BT_UART_MSG_2_FRAME4NUMLECONN_MSK \ + (0x3<<BT_UART_MSG_2_FRAME4NUMLECONN_POS) +#define BT_UART_MSG_2_FRAME4RESERVED_POS (6) +#define BT_UART_MSG_2_FRAME4RESERVED_MSK \ + (0x3<<BT_UART_MSG_2_FRAME4RESERVED_POS) + +#define BT_UART_MSG_2_FRAME5BTMINRSSI_POS (0) +#define BT_UART_MSG_2_FRAME5BTMINRSSI_MSK \ + (0xF<<BT_UART_MSG_2_FRAME5BTMINRSSI_POS) +#define BT_UART_MSG_2_FRAME5LESCANINITMODE_POS (4) +#define BT_UART_MSG_2_FRAME5LESCANINITMODE_MSK \ + (0x1<<BT_UART_MSG_2_FRAME5LESCANINITMODE_POS) +#define BT_UART_MSG_2_FRAME5LEADVERMODE_POS (5) +#define BT_UART_MSG_2_FRAME5LEADVERMODE_MSK \ + (0x1<<BT_UART_MSG_2_FRAME5LEADVERMODE_POS) +#define BT_UART_MSG_2_FRAME5RESERVED_POS (6) +#define BT_UART_MSG_2_FRAME5RESERVED_MSK \ + (0x3<<BT_UART_MSG_2_FRAME5RESERVED_POS) + +#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS (0) +#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_MSK \ + (0x1F<<BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS) +#define BT_UART_MSG_2_FRAME6RFU_POS (5) +#define BT_UART_MSG_2_FRAME6RFU_MSK \ + (0x1<<BT_UART_MSG_2_FRAME6RFU_POS) +#define BT_UART_MSG_2_FRAME6RESERVED_POS (6) +#define BT_UART_MSG_2_FRAME6RESERVED_MSK \ + (0x3<<BT_UART_MSG_2_FRAME6RESERVED_POS) + +#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS (0) +#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_MSK \ + (0x7<<BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS) +#define BT_UART_MSG_2_FRAME7LEPROFILE1_POS (3) +#define BT_UART_MSG_2_FRAME7LEPROFILE1_MSK \ + (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE1_POS) +#define BT_UART_MSG_2_FRAME7LEPROFILE2_POS (4) +#define BT_UART_MSG_2_FRAME7LEPROFILE2_MSK \ + (0x1<<BT_UART_MSG_2_FRAME7LEPROFILE2_POS) +#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS (5) +#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_MSK \ + (0x1<<BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS) +#define BT_UART_MSG_2_FRAME7RESERVED_POS (6) +#define BT_UART_MSG_2_FRAME7RESERVED_MSK \ + (0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS) + struct iwl_bt_uart_msg { u8 header; @@ -4369,6 +4482,11 @@ int iwl_agn_check_rxon_cmd(struct iwl_priv *priv); * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification) */ +/* + * Minimum slot time in TU + */ +#define IWL_MIN_SLOT_TIME 20 + /** * struct iwl_wipan_slot * @width: Time in TU diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index efbde1f1a8bf..6c30fa652e27 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -43,11 +43,6 @@ #include "iwl-helpers.h" -MODULE_DESCRIPTION("iwl core"); -MODULE_VERSION(IWLWIFI_VERSION); -MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); -MODULE_LICENSE("GPL"); - /* * set bt_coex_active to true, uCode will do kill/defer * every time the priority line is asserted (BT is sending signals on the @@ -65,15 +60,12 @@ MODULE_LICENSE("GPL"); * default: bt_coex_active = true (BT_COEX_ENABLE) */ bool bt_coex_active = true; -EXPORT_SYMBOL_GPL(bt_coex_active); module_param(bt_coex_active, bool, S_IRUGO); MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist"); u32 iwl_debug_level; -EXPORT_SYMBOL(iwl_debug_level); const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; -EXPORT_SYMBOL(iwl_bcast_addr); /* This function both allocates and initializes hw and priv. */ @@ -98,7 +90,6 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg) out: return hw; } -EXPORT_SYMBOL(iwl_alloc_all); #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ @@ -219,15 +210,12 @@ int iwlcore_init_geos(struct iwl_priv *priv) if (!is_channel_valid(ch)) continue; - if (is_channel_a_band(ch)) - sband = &priv->bands[IEEE80211_BAND_5GHZ]; - else - sband = &priv->bands[IEEE80211_BAND_2GHZ]; + sband = &priv->bands[ch->band]; geo_ch = &sband->channels[sband->n_channels++]; geo_ch->center_freq = - ieee80211_channel_to_frequency(ch->channel); + ieee80211_channel_to_frequency(ch->channel, ch->band); geo_ch->max_power = ch->max_power_avg; geo_ch->max_antenna_gain = 0xff; geo_ch->hw_value = ch->channel; @@ -275,7 +263,6 @@ int iwlcore_init_geos(struct iwl_priv *priv) return 0; } -EXPORT_SYMBOL(iwlcore_init_geos); /* * iwlcore_free_geos - undo allocations in iwlcore_init_geos @@ -286,7 +273,6 @@ void iwlcore_free_geos(struct iwl_priv *priv) kfree(priv->ieee_rates); clear_bit(STATUS_GEO_CONFIGURED, &priv->status); } -EXPORT_SYMBOL(iwlcore_free_geos); static bool iwl_is_channel_extension(struct iwl_priv *priv, enum ieee80211_band band, @@ -331,7 +317,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, le16_to_cpu(ctx->staging.channel), ctx->ht.extension_chan_offset); } -EXPORT_SYMBOL(iwl_is_ht40_tx_allowed); static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val) { @@ -432,7 +417,6 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx) return iwl_send_cmd_pdu(priv, ctx->rxon_timing_cmd, sizeof(ctx->timing), &ctx->timing); } -EXPORT_SYMBOL(iwl_send_rxon_timing); void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, int hw_decrypt) @@ -445,7 +429,6 @@ void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; } -EXPORT_SYMBOL(iwl_set_rxon_hwcrypto); /* validate RXON structure is valid */ int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) @@ -518,7 +501,6 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) } return 0; } -EXPORT_SYMBOL(iwl_check_rxon_cmd); /** * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed @@ -582,7 +564,6 @@ int iwl_full_rxon_required(struct iwl_priv *priv, return 0; } -EXPORT_SYMBOL(iwl_full_rxon_required); u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv, struct iwl_rxon_context *ctx) @@ -596,7 +577,6 @@ u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv, else return IWL_RATE_6M_PLCP; } -EXPORT_SYMBOL(iwl_rate_get_lowest_plcp); static void _iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf, @@ -673,7 +653,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf) for_each_context(priv, ctx) _iwl_set_rxon_ht(priv, ht_conf, ctx); } -EXPORT_SYMBOL(iwl_set_rxon_ht); /* Return valid, unused, channel for a passive scan to reset the RF */ u8 iwl_get_single_channel_number(struct iwl_priv *priv, @@ -714,7 +693,6 @@ u8 iwl_get_single_channel_number(struct iwl_priv *priv, return channel; } -EXPORT_SYMBOL(iwl_get_single_channel_number); /** * iwl_set_rxon_channel - Set the band and channel values in staging RXON @@ -745,7 +723,6 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, return 0; } -EXPORT_SYMBOL(iwl_set_rxon_channel); void iwl_set_flags_for_band(struct iwl_priv *priv, struct iwl_rxon_context *ctx, @@ -769,7 +746,6 @@ void iwl_set_flags_for_band(struct iwl_priv *priv, ctx->staging.flags &= ~RXON_FLG_CCK_MSK; } } -EXPORT_SYMBOL(iwl_set_flags_for_band); /* * initialize rxon structure with default values from eeprom @@ -841,7 +817,6 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff; } -EXPORT_SYMBOL(iwl_connection_init_rx_config); void iwl_set_rate(struct iwl_priv *priv) { @@ -874,7 +849,6 @@ void iwl_set_rate(struct iwl_priv *priv) (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; } } -EXPORT_SYMBOL(iwl_set_rate); void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) { @@ -894,35 +868,6 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success) mutex_unlock(&priv->mutex); } } -EXPORT_SYMBOL(iwl_chswitch_done); - -void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_csa_notification *csa = &(pkt->u.csa_notif); - /* - * MULTI-FIXME - * See iwl_mac_channel_switch. - */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl_rxon_cmd *rxon = (void *)&ctx->active; - - if (priv->switch_rxon.switch_in_progress) { - if (!le32_to_cpu(csa->status) && - (csa->channel == priv->switch_rxon.channel)) { - rxon->channel = csa->channel; - ctx->staging.channel = csa->channel; - IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", - le16_to_cpu(csa->channel)); - iwl_chswitch_done(priv, true); - } else { - IWL_ERR(priv, "CSA notif (fail) : channel %d\n", - le16_to_cpu(csa->channel)); - iwl_chswitch_done(priv, false); - } - } -} -EXPORT_SYMBOL(iwl_rx_csa); #ifdef CONFIG_IWLWIFI_DEBUG void iwl_print_rx_config_cmd(struct iwl_priv *priv, @@ -944,13 +889,15 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv, IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); } -EXPORT_SYMBOL(iwl_print_rx_config_cmd); #endif /** * iwl_irq_handle_error - called for HW or SW error interrupt from card */ void iwl_irq_handle_error(struct iwl_priv *priv) { + unsigned int reload_msec; + unsigned long reload_jiffies; + /* Set the FW error flag -- cleared on iwl_down */ set_bit(STATUS_FW_ERROR, &priv->status); @@ -994,6 +941,25 @@ void iwl_irq_handle_error(struct iwl_priv *priv) * commands by clearing the INIT status bit */ clear_bit(STATUS_READY, &priv->status); + /* + * If firmware keep reloading, then it indicate something + * serious wrong and firmware having problem to recover + * from it. Instead of keep trying which will fill the syslog + * and hang the system, let's just stop it + */ + reload_jiffies = jiffies; + reload_msec = jiffies_to_msecs((long) reload_jiffies - + (long) priv->reload_jiffies); + priv->reload_jiffies = reload_jiffies; + if (reload_msec <= IWL_MIN_RELOAD_DURATION) { + priv->reload_count++; + if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) { + IWL_ERR(priv, "BUG_ON, Stop restarting\n"); + return; + } + } else + priv->reload_count = 0; + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { IWL_DEBUG(priv, IWL_DL_FW_ERRORS, "Restarting adapter due to uCode error.\n"); @@ -1002,7 +968,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv) queue_work(priv->workqueue, &priv->restart); } } -EXPORT_SYMBOL(iwl_irq_handle_error); static int iwl_apm_stop_master(struct iwl_priv *priv) { @@ -1039,7 +1004,6 @@ void iwl_apm_stop(struct iwl_priv *priv) */ iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); } -EXPORT_SYMBOL(iwl_apm_stop); /* @@ -1154,13 +1118,14 @@ int iwl_apm_init(struct iwl_priv *priv) out: return ret; } -EXPORT_SYMBOL(iwl_apm_init); int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) { int ret; s8 prev_tx_power; + bool defer; + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; lockdep_assert_held(&priv->mutex); @@ -1188,10 +1153,15 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) if (!iwl_is_ready_rf(priv)) return -EIO; - /* scan complete use tx_power_next, need to be updated */ + /* scan complete and commit_rxon use tx_power_next value, + * it always need to be updated for newest request */ priv->tx_power_next = tx_power; - if (test_bit(STATUS_SCANNING, &priv->status) && !force) { - IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n"); + + /* do not set tx power when scanning or channel changing */ + defer = test_bit(STATUS_SCANNING, &priv->status) || + memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); + if (defer && !force) { + IWL_DEBUG_INFO(priv, "Deferring tx power set\n"); return 0; } @@ -1207,7 +1177,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) } return ret; } -EXPORT_SYMBOL(iwl_set_tx_power); void iwl_send_bt_config(struct iwl_priv *priv) { @@ -1231,7 +1200,6 @@ void iwl_send_bt_config(struct iwl_priv *priv) sizeof(struct iwl_bt_cmd), &bt_cmd)) IWL_ERR(priv, "failed to send BT Coex Config\n"); } -EXPORT_SYMBOL(iwl_send_bt_config); int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) { @@ -1249,46 +1217,6 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear) sizeof(struct iwl_statistics_cmd), &statistics_cmd); } -EXPORT_SYMBOL(iwl_send_statistics_request); - -void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ -#ifdef CONFIG_IWLWIFI_DEBUG - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); - IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", - sleep->pm_sleep_mode, sleep->pm_wakeup_src); -#endif -} -EXPORT_SYMBOL(iwl_rx_pm_sleep_notif); - -void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " - "notification for %s:\n", len, - get_cmd_string(pkt->hdr.cmd)); - iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); -} -EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif); - -void iwl_rx_reply_error(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - - IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " - "seq 0x%04X ser 0x%08X\n", - le32_to_cpu(pkt->u.err_resp.error_type), - get_cmd_string(pkt->u.err_resp.cmd_id), - pkt->u.err_resp.cmd_id, - le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), - le32_to_cpu(pkt->u.err_resp.error_info)); -} -EXPORT_SYMBOL(iwl_rx_reply_error); void iwl_clear_isr_stats(struct iwl_priv *priv) { @@ -1340,7 +1268,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, IWL_DEBUG_MAC80211(priv, "leave\n"); return 0; } -EXPORT_SYMBOL(iwl_mac_conf_tx); int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw) { @@ -1348,7 +1275,6 @@ int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw) return priv->ibss_manager == IWL_IBSS_MANAGER; } -EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon); static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { @@ -1403,9 +1329,10 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; struct iwl_rxon_context *tmp, *ctx = NULL; int err; + enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", - vif->type, vif->addr); + viftype, vif->addr); mutex_lock(&priv->mutex); @@ -1429,7 +1356,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) continue; } - if (!(possible_modes & BIT(vif->type))) + if (!(possible_modes & BIT(viftype))) continue; /* have maybe usable context w/o interface */ @@ -1457,7 +1384,6 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) IWL_DEBUG_MAC80211(priv, "leave\n"); return err; } -EXPORT_SYMBOL(iwl_mac_add_interface); static void iwl_teardown_interface(struct iwl_priv *priv, struct ieee80211_vif *vif, @@ -1510,7 +1436,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(priv, "leave\n"); } -EXPORT_SYMBOL(iwl_mac_remove_interface); int iwl_alloc_txq_mem(struct iwl_priv *priv) { @@ -1525,14 +1450,12 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv) } return 0; } -EXPORT_SYMBOL(iwl_alloc_txq_mem); void iwl_free_txq_mem(struct iwl_priv *priv) { kfree(priv->txq); priv->txq = NULL; } -EXPORT_SYMBOL(iwl_free_txq_mem); #ifdef CONFIG_IWLWIFI_DEBUGFS @@ -1571,7 +1494,6 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv) iwl_reset_traffic_log(priv); return 0; } -EXPORT_SYMBOL(iwl_alloc_traffic_mem); void iwl_free_traffic_mem(struct iwl_priv *priv) { @@ -1581,7 +1503,6 @@ void iwl_free_traffic_mem(struct iwl_priv *priv) kfree(priv->rx_traffic); priv->rx_traffic = NULL; } -EXPORT_SYMBOL(iwl_free_traffic_mem); void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv, u16 length, struct ieee80211_hdr *header) @@ -1606,7 +1527,6 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv, (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; } } -EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame); void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, u16 length, struct ieee80211_hdr *header) @@ -1631,7 +1551,6 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES; } } -EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame); const char *get_mgmt_string(int cmd) { @@ -1675,7 +1594,6 @@ void iwl_clear_traffic_stats(struct iwl_priv *priv) { memset(&priv->tx_stats, 0, sizeof(struct traffic_stats)); memset(&priv->rx_stats, 0, sizeof(struct traffic_stats)); - priv->led_tpt = 0; } /* @@ -1768,9 +1686,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len) stats->data_cnt++; stats->data_bytes += len; } - iwl_leds_background(priv); } -EXPORT_SYMBOL(iwl_update_stats); #endif static void iwl_force_rf_reset(struct iwl_priv *priv) @@ -1909,7 +1825,6 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mutex_unlock(&priv->mutex); return err; } -EXPORT_SYMBOL(iwl_mac_change_interface); /* * On every watchdog tick we check (latest) time stamp. If it does not @@ -1981,7 +1896,6 @@ void iwl_bg_watchdog(unsigned long data) mod_timer(&priv->watchdog, jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout))); } -EXPORT_SYMBOL(iwl_bg_watchdog); void iwl_setup_watchdog(struct iwl_priv *priv) { @@ -1993,7 +1907,6 @@ void iwl_setup_watchdog(struct iwl_priv *priv) else del_timer(&priv->watchdog); } -EXPORT_SYMBOL(iwl_setup_watchdog); /* * extended beacon time format @@ -2019,7 +1932,6 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval) return (quot << priv->hw_params.beacon_time_tsf_bits) + rem; } -EXPORT_SYMBOL(iwl_usecs_to_beacons); /* base is usually what we get from ucode with each received frame, * the same as HW timer counter counting down @@ -2047,7 +1959,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, return cpu_to_le32(res); } -EXPORT_SYMBOL(iwl_add_beacon_time); #ifdef CONFIG_PM @@ -2067,7 +1978,6 @@ int iwl_pci_suspend(struct device *device) return 0; } -EXPORT_SYMBOL(iwl_pci_suspend); int iwl_pci_resume(struct device *device) { @@ -2096,7 +2006,6 @@ int iwl_pci_resume(struct device *device) return 0; } -EXPORT_SYMBOL(iwl_pci_resume); const struct dev_pm_ops iwl_pm_ops = { .suspend = iwl_pci_suspend, @@ -2106,6 +2015,5 @@ const struct dev_pm_ops iwl_pm_ops = { .poweroff = iwl_pci_suspend, .restore = iwl_pci_resume, }; -EXPORT_SYMBOL(iwl_pm_ops); #endif /* CONFIG_PM */ diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index a3474376fdbc..b316d833d9a2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -63,6 +63,8 @@ #ifndef __iwl_core_h__ #define __iwl_core_h__ +#include "iwl-dev.h" + /************************ * forward declarations * ************************/ @@ -210,12 +212,7 @@ struct iwl_lib_ops { /* temperature */ struct iwl_temp_ops temp_ops; - /* check for plcp health */ - bool (*check_plcp_health)(struct iwl_priv *priv, - struct iwl_rx_packet *pkt); - /* check for ack health */ - bool (*check_ack_health)(struct iwl_priv *priv, - struct iwl_rx_packet *pkt); + int (*txfifo_flush)(struct iwl_priv *priv, u16 flush_control); void (*dev_txfifo_flush)(struct iwl_priv *priv, u16 flush_control); @@ -227,8 +224,6 @@ struct iwl_lib_ops { struct iwl_led_ops { int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd); - int (*on)(struct iwl_priv *priv); - int (*off)(struct iwl_priv *priv); }; /* NIC specific ops */ @@ -263,6 +258,8 @@ struct iwl_mod_params { int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ int antenna; /* def: 0 = both antennas (use diversity) */ int restart_fw; /* def: 1 = restart firmware */ + bool plcp_check; /* def: true = enable plcp health check */ + bool ack_check; /* def: false = disable ack health check */ }; /* @@ -307,7 +304,6 @@ struct iwl_base_params { u16 led_compensation; const bool broken_powersave; int chain_noise_num_beacons; - const bool supports_idle; bool adv_thermal_throttle; bool support_ct_kill_exit; const bool support_wimax_coexist; @@ -342,6 +338,7 @@ struct iwl_bt_params { u8 ampdu_factor; u8 ampdu_density; bool bt_sco_disable; + bool bt_session_2; }; /* * @use_rts_for_aggregation: use rts/cts protection for HT traffic @@ -366,6 +363,7 @@ struct iwl_ht_params { * @adv_pm: advance power management * @rx_with_siso_diversity: 1x1 device with rx antenna diversity * @internal_wimax_coex: internal wifi/wimax combo device + * @iq_invert: I/Q inversion * * We enable the driver to be backward compatible wrt API version. The * driver specifies which APIs it supports (with @ucode_api_max being the @@ -415,6 +413,7 @@ struct iwl_cfg { const bool adv_pm; const bool rx_with_siso_diversity; const bool internal_wimax_coex; + const bool iq_invert; }; /*************************** @@ -444,10 +443,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv, void iwl_connection_init_rx_config(struct iwl_priv *priv, struct iwl_rxon_context *ctx); void iwl_set_rate(struct iwl_priv *priv); -int iwl_set_decrypted_flag(struct iwl_priv *priv, - struct ieee80211_hdr *hdr, - u32 decrypt_res, - struct ieee80211_rx_status *stats); void iwl_irq_handle_error(struct iwl_priv *priv); int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); @@ -494,46 +489,21 @@ static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv, static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len) { - struct traffic_stats *stats; - - if (is_tx) - stats = &priv->tx_stats; - else - stats = &priv->rx_stats; - - if (ieee80211_is_data(fc)) { - /* data */ - stats->data_bytes += len; - } - iwl_leds_background(priv); } #endif -/***************************************************** - * RX handlers. - * **************************************************/ -void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl_rx_reply_error(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); /***************************************************** * RX ******************************************************/ void iwl_cmd_queue_free(struct iwl_priv *priv); +void iwl_cmd_queue_unmap(struct iwl_priv *priv); int iwl_rx_queue_alloc(struct iwl_priv *priv); void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q); int iwl_rx_queue_space(const struct iwl_rx_queue *q); void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); -/* Handlers */ -void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, - struct iwl_rx_mem_buffer *rxb); -void iwl_recover_from_statistics(struct iwl_priv *priv, - struct iwl_rx_packet *pkt); + void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); -void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); /* TX helpers */ @@ -546,6 +516,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, int slots_num, u32 txq_id); void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); +void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id); void iwl_setup_watchdog(struct iwl_priv *priv); /***************************************************** * TX power @@ -582,6 +553,10 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, struct ieee80211_vif *vif); void iwl_setup_scan_deferred_work(struct iwl_priv *priv); void iwl_cancel_scan_deferred_work(struct iwl_priv *priv); +int __must_check iwl_scan_initiate(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum iwl_scan_type scan_type, + enum ieee80211_band band); /* For faster active scanning, scan will move to the next channel if fewer than * PLCP_QUIET_THRESH packets are heard on this channel within @@ -755,6 +730,17 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode( return priv->hw->wiphy->bands[band]; } +static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv) +{ + return priv->cfg->bt_params && + priv->cfg->bt_params->advanced_bt_coexist; +} + +static inline bool iwl_bt_statistics(struct iwl_priv *priv) +{ + return priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics; +} + extern bool bt_coex_active; extern bool bt_siso_mode; diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h index b80bf7dff55b..f52bc040bcbf 100644 --- a/drivers/net/wireless/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/iwlwifi/iwl-csr.h @@ -290,7 +290,7 @@ /* HW REV */ -#define CSR_HW_REV_TYPE_MSK (0x00000F0) +#define CSR_HW_REV_TYPE_MSK (0x00001F0) #define CSR_HW_REV_TYPE_3945 (0x00000D0) #define CSR_HW_REV_TYPE_4965 (0x0000000) #define CSR_HW_REV_TYPE_5300 (0x0000020) @@ -300,9 +300,15 @@ #define CSR_HW_REV_TYPE_1000 (0x0000060) #define CSR_HW_REV_TYPE_6x00 (0x0000070) #define CSR_HW_REV_TYPE_6x50 (0x0000080) -#define CSR_HW_REV_TYPE_6x50g2 (0x0000084) -#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0) -#define CSR_HW_REV_TYPE_NONE (0x00000F0) +#define CSR_HW_REV_TYPE_6150 (0x0000084) +#define CSR_HW_REV_TYPE_6x05 (0x00000B0) +#define CSR_HW_REV_TYPE_6x30 CSR_HW_REV_TYPE_6x05 +#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05 +#define CSR_HW_REV_TYPE_2x30 (0x00000C0) +#define CSR_HW_REV_TYPE_2x00 (0x0000100) +#define CSR_HW_REV_TYPE_200 (0x0000110) +#define CSR_HW_REV_TYPE_230 (0x0000120) +#define CSR_HW_REV_TYPE_NONE (0x00001F0) /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) @@ -376,6 +382,8 @@ #define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004) #define CSR_GP_DRIVER_REG_BIT_6050_1x2 (0x00000008) +#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER (0x00000080) + /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) #define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c index 6fe80b5e7a15..8842411f1cf3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -207,18 +207,19 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file, return ret; } -#define BYTE1_MASK 0x000000ff; -#define BYTE2_MASK 0x0000ffff; -#define BYTE3_MASK 0x00ffffff; static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - u32 val; + u32 val = 0; char *buf; ssize_t ret; - int i; + int i = 0; + bool device_format = false; + int offset = 0; + int len = 0; int pos = 0; + int sram; struct iwl_priv *priv = file->private_data; size_t bufsz; @@ -230,35 +231,62 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, else priv->dbgfs_sram_len = priv->ucode_data.len; } - bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10; + len = priv->dbgfs_sram_len; + + if (len == -4) { + device_format = true; + len = 4; + } + + bufsz = 50 + len * 4; buf = kmalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; + pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n", - priv->dbgfs_sram_len); + len); pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n", priv->dbgfs_sram_offset); - for (i = priv->dbgfs_sram_len; i > 0; i -= 4) { - val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \ - priv->dbgfs_sram_len - i); - if (i < 4) { - switch (i) { - case 1: - val &= BYTE1_MASK; - break; - case 2: - val &= BYTE2_MASK; - break; - case 3: - val &= BYTE3_MASK; - break; - } + + /* adjust sram address since reads are only on even u32 boundaries */ + offset = priv->dbgfs_sram_offset & 0x3; + sram = priv->dbgfs_sram_offset & ~0x3; + + /* read the first u32 from sram */ + val = iwl_read_targ_mem(priv, sram); + + for (; len; len--) { + /* put the address at the start of every line */ + if (i == 0) + pos += scnprintf(buf + pos, bufsz - pos, + "%08X: ", sram + offset); + + if (device_format) + pos += scnprintf(buf + pos, bufsz - pos, + "%02x", (val >> (8 * (3 - offset))) & 0xff); + else + pos += scnprintf(buf + pos, bufsz - pos, + "%02x ", (val >> (8 * offset)) & 0xff); + + /* if all bytes processed, read the next u32 from sram */ + if (++offset == 4) { + sram += 4; + offset = 0; + val = iwl_read_targ_mem(priv, sram); } - if (!(i % 16)) + + /* put in extra spaces and split lines for human readability */ + if (++i == 16) { + i = 0; pos += scnprintf(buf + pos, bufsz - pos, "\n"); - pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); + } else if (!(i & 7)) { + pos += scnprintf(buf + pos, bufsz - pos, " "); + } else if (!(i & 3)) { + pos += scnprintf(buf + pos, bufsz - pos, " "); + } } - pos += scnprintf(buf + pos, bufsz - pos, "\n"); + if (i) + pos += scnprintf(buf + pos, bufsz - pos, "\n"); ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); @@ -282,6 +310,9 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file, if (sscanf(buf, "%x,%x", &offset, &len) == 2) { priv->dbgfs_sram_offset = offset; priv->dbgfs_sram_len = len; + } else if (sscanf(buf, "%x", &offset) == 1) { + priv->dbgfs_sram_offset = offset; + priv->dbgfs_sram_len = -4; } else { priv->dbgfs_sram_offset = 0; priv->dbgfs_sram_len = 0; @@ -668,29 +699,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf, return simple_read_from_buffer(user_buf, count, ppos, buf, pos); } -static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_priv *priv = file->private_data; - int pos = 0; - char buf[256]; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, - "allow blinking: %s\n", - (priv->allow_blinking) ? "True" : "False"); - if (priv->allow_blinking) { - pos += scnprintf(buf + pos, bufsz - pos, - "Led blinking rate: %u\n", - priv->last_blink_rate); - pos += scnprintf(buf + pos, bufsz - pos, - "Last blink time: %lu\n", - priv->last_blink_time); - } - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -856,7 +864,6 @@ DEBUGFS_READ_FILE_OPS(channels); DEBUGFS_READ_FILE_OPS(status); DEBUGFS_READ_WRITE_FILE_OPS(interrupt); DEBUGFS_READ_FILE_OPS(qos); -DEBUGFS_READ_FILE_OPS(led); DEBUGFS_READ_FILE_OPS(thermal_throttling); DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); @@ -1580,10 +1587,9 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file, "last traffic notif: %d\n", priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load); pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, " - "sco_active: %d, kill_ack_mask: %x, " - "kill_cts_mask: %x\n", - priv->bt_ch_announce, priv->bt_sco_active, - priv->kill_ack_mask, priv->kill_cts_mask); + "kill_ack_mask: %x, kill_cts_mask: %x\n", + priv->bt_ch_announce, priv->kill_ack_mask, + priv->kill_cts_mask); pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: "); switch (priv->bt_traffic_load) { @@ -1725,7 +1731,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); - DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR); if (!priv->cfg->base_params->broken_powersave) { DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR); @@ -1759,13 +1764,13 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); if (priv->cfg->base_params->ucode_tracing) DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); - if (priv->cfg->bt_params && priv->cfg->bt_params->bt_statistics) + if (iwl_bt_statistics(priv)) DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR); - if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) + if (iwl_advanced_bt_coexist(priv)) DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); if (priv->cfg->base_params->sensitivity_calib_by_driver) DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, @@ -1783,7 +1788,6 @@ err: iwl_dbgfs_unregister(priv); return -ENOMEM; } -EXPORT_SYMBOL(iwl_dbgfs_register); /** * Remove the debugfs files and directories @@ -1797,7 +1801,6 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv) debugfs_remove_recursive(priv->debugfs_dir); priv->debugfs_dir = NULL; } -EXPORT_SYMBOL(iwl_dbgfs_unregister); diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index 8dda67850af4..68b953f2bdc7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -34,6 +34,8 @@ #include <linux/pci.h> /* for struct pci_device_id */ #include <linux/kernel.h> +#include <linux/wait.h> +#include <linux/leds.h> #include <net/ieee80211_radiotap.h> #include "iwl-eeprom.h" @@ -41,14 +43,14 @@ #include "iwl-prph.h" #include "iwl-fh.h" #include "iwl-debug.h" -#include "iwl-4965-hw.h" -#include "iwl-3945-hw.h" #include "iwl-agn-hw.h" #include "iwl-led.h" #include "iwl-power.h" #include "iwl-agn-rs.h" #include "iwl-agn-tt.h" +#define U32_PAD(n) ((4-(n))&0x3) + struct iwl_tx_queue; /* CT-KILL constants */ @@ -136,7 +138,7 @@ struct iwl_queue { * space more than this */ int high_mark; /* high watermark, stop queue if free * space less than this */ -} __packed; +}; /* One for each TFD */ struct iwl_tx_info { @@ -507,6 +509,7 @@ struct iwl_station_priv { atomic_t pending_frames; bool client; bool asleep; + u8 max_agg_bufsize; }; /** @@ -995,7 +998,6 @@ struct reply_agg_tx_error_statistics { u32 unknown; }; -#ifdef CONFIG_IWLWIFI_DEBUGFS /* management statistics */ enum iwl_mgmt_stats { MANAGEMENT_ASSOC_REQ = 0, @@ -1026,16 +1028,13 @@ enum iwl_ctrl_stats { }; struct traffic_stats { +#ifdef CONFIG_IWLWIFI_DEBUGFS u32 mgmt[MANAGEMENT_MAX]; u32 ctrl[CONTROL_MAX]; u32 data_cnt; u64 data_bytes; -}; -#else -struct traffic_stats { - u64 data_bytes; -}; #endif +}; /* * iwl_switch_rxon: "channel switch" structure @@ -1111,6 +1110,11 @@ struct iwl_event_log { /* BT Antenna Coupling Threshold (dB) */ #define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35) +/* Firmware reload counter and Timestamp */ +#define IWL_MIN_RELOAD_DURATION 1000 /* 1000 ms */ +#define IWL_MAX_CONTINUE_RELOAD_CNT 4 + + enum iwl_reset { IWL_RF_RESET = 0, IWL_FW_RESET, @@ -1139,6 +1143,33 @@ struct iwl_force_reset { */ #define IWLAGN_EXT_BEACON_TIME_POS 22 +/** + * struct iwl_notification_wait - notification wait entry + * @list: list head for global list + * @fn: function called with the notification + * @cmd: command ID + * + * This structure is not used directly, to wait for a + * notification declare it on the stack, and call + * iwlagn_init_notification_wait() with appropriate + * parameters. Then do whatever will cause the ucode + * to notify the driver, and to wait for that then + * call iwlagn_wait_notification(). + * + * Each notification is one-shot. If at some point we + * need to support multi-shot notifications (which + * can't be allocated on the stack) we need to modify + * the code for them. + */ +struct iwl_notification_wait { + struct list_head list; + + void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt); + + u8 cmd; + bool triggered; +}; + enum iwl_rxon_context_id { IWL_RXON_CTX_BSS, IWL_RXON_CTX_PAN, @@ -1199,6 +1230,12 @@ struct iwl_rxon_context { } ht; }; +enum iwl_scan_type { + IWL_SCAN_NORMAL, + IWL_SCAN_RADIO_RESET, + IWL_SCAN_OFFCH_TX, +}; + struct iwl_priv { /* ieee device used by generic ieee processing code */ @@ -1230,12 +1267,16 @@ struct iwl_priv { /* track IBSS manager (last beacon) status */ u32 ibss_manager; - /* storing the jiffies when the plcp error rate is received */ - unsigned long plcp_jiffies; + /* jiffies when last recovery from statistics was performed */ + unsigned long rx_statistics_jiffies; /* force reset */ struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; + /* firmware reload counter and timestamp */ + unsigned long reload_jiffies; + int reload_count; + /* we allocate array of iwl_channel_info for NIC's valid channels. * Access via channel # using indirect index array */ struct iwl_channel_info *channel_info; /* channel info array */ @@ -1255,7 +1296,7 @@ struct iwl_priv { enum ieee80211_band scan_band; struct cfg80211_scan_request *scan_request; struct ieee80211_vif *scan_vif; - bool is_internal_short_scan; + enum iwl_scan_type scan_type; u8 scan_tx_ant[IEEE80211_NUM_BANDS]; u8 mgmt_tx_ant; @@ -1310,11 +1351,6 @@ struct iwl_priv { struct iwl_init_alive_resp card_alive_init; struct iwl_alive_resp card_alive; - unsigned long last_blink_time; - u8 last_blink_rate; - u8 allow_blinking; - u64 led_tpt; - u16 active_rate; u8 start_calib; @@ -1463,6 +1499,21 @@ struct iwl_priv { struct iwl_bt_notif_statistics delta_statistics_bt; struct iwl_bt_notif_statistics max_delta_bt; #endif + + /* notification wait support */ + struct list_head notif_waits; + spinlock_t notif_wait_lock; + wait_queue_head_t notif_waitq; + + /* remain-on-channel offload support */ + struct ieee80211_channel *hw_roc_channel; + struct delayed_work hw_roc_work; + enum nl80211_channel_type hw_roc_chantype; + int hw_roc_duration; + + struct sk_buff *offchan_tx_skb; + int offchan_tx_timeout; + struct ieee80211_channel *offchan_tx_chan; } _agn; #endif }; @@ -1472,7 +1523,6 @@ struct iwl_priv { u8 bt_status; u8 bt_traffic_load, last_bt_traffic_load; bool bt_ch_announce; - bool bt_sco_active; bool bt_full_concurrent; bool bt_ant_couple_ok; __le32 kill_ack_mask; @@ -1547,6 +1597,10 @@ struct iwl_priv { bool hw_ready; struct iwl_event_log event_log; + + struct led_classdev led; + unsigned long blink_on, blink_off; + bool led_registered; }; /*iwl_priv */ static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 358cfd7e5af1..833194a2c639 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -222,7 +222,6 @@ const u8 *iwlcore_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) BUG_ON(offset >= priv->cfg->base_params->eeprom_size); return &priv->eeprom[offset]; } -EXPORT_SYMBOL(iwlcore_eeprom_query_addr); static int iwl_init_otp_access(struct iwl_priv *priv) { @@ -382,7 +381,6 @@ const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset) { return priv->cfg->ops->lib->eeprom_ops.query_addr(priv, offset); } -EXPORT_SYMBOL(iwl_eeprom_query_addr); u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset) { @@ -390,7 +388,6 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset) return 0; return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8); } -EXPORT_SYMBOL(iwl_eeprom_query16); /** * iwl_eeprom_init - read EEPROM contents @@ -509,14 +506,12 @@ err: alloc_err: return ret; } -EXPORT_SYMBOL(iwl_eeprom_init); void iwl_eeprom_free(struct iwl_priv *priv) { kfree(priv->eeprom); priv->eeprom = NULL; } -EXPORT_SYMBOL(iwl_eeprom_free); static void iwl_init_band_reference(const struct iwl_priv *priv, int eep_band, int *eeprom_ch_count, @@ -779,7 +774,6 @@ int iwl_init_channel_map(struct iwl_priv *priv) return 0; } -EXPORT_SYMBOL(iwl_init_channel_map); /* * iwl_free_channel_map - undo allocations in iwl_init_channel_map @@ -789,7 +783,6 @@ void iwl_free_channel_map(struct iwl_priv *priv) kfree(priv->channel_info); priv->channel_count = 0; } -EXPORT_SYMBOL(iwl_free_channel_map); /** * iwl_get_channel_info - Find driver's private channel info @@ -818,4 +811,3 @@ const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv, return NULL; } -EXPORT_SYMBOL(iwl_get_channel_info); diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 9e6f31355eee..98aa8af01192 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -247,13 +247,26 @@ struct iwl_eeprom_enhanced_txpwr { #define EEPROM_6050_TX_POWER_VERSION (4) #define EEPROM_6050_EEPROM_VERSION (0x532) -/* 6x50g2 Specific */ -#define EEPROM_6050G2_TX_POWER_VERSION (6) -#define EEPROM_6050G2_EEPROM_VERSION (0x553) +/* 6150 Specific */ +#define EEPROM_6150_TX_POWER_VERSION (6) +#define EEPROM_6150_EEPROM_VERSION (0x553) + +/* 6x05 Specific */ +#define EEPROM_6005_TX_POWER_VERSION (6) +#define EEPROM_6005_EEPROM_VERSION (0x709) + +/* 6x30 Specific */ +#define EEPROM_6030_TX_POWER_VERSION (6) +#define EEPROM_6030_EEPROM_VERSION (0x709) + +/* 2x00 Specific */ +#define EEPROM_2000_TX_POWER_VERSION (6) +#define EEPROM_2000_EEPROM_VERSION (0x805) + +/* 6x35 Specific */ +#define EEPROM_6035_TX_POWER_VERSION (6) +#define EEPROM_6035_EEPROM_VERSION (0x753) -/* 6x00g2 Specific */ -#define EEPROM_6000G2_TX_POWER_VERSION (6) -#define EEPROM_6000G2_EEPROM_VERSION (0x709) /* OTP */ /* lower blocks contain EEPROM image and calibration data */ @@ -264,6 +277,7 @@ struct iwl_eeprom_enhanced_txpwr { #define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */ #define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */ #define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */ +#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */ /* 2.4 GHz */ extern const u8 iwl_eeprom_band_1[14]; diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c index c373b53babea..02499f684683 100644 --- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c +++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c @@ -108,12 +108,12 @@ const char *get_cmd_string(u8 cmd) IWL_CMD(REPLY_WIPAN_WEPKEY); IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH); IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION); + IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE); default: return "UNKNOWN"; } } -EXPORT_SYMBOL(get_cmd_string); #define HOST_COMPLETE_TIMEOUT (HZ / 2) @@ -252,7 +252,6 @@ out: mutex_unlock(&priv->sync_cmd_mutex); return ret; } -EXPORT_SYMBOL(iwl_send_cmd_sync); int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) { @@ -261,7 +260,6 @@ int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) return iwl_send_cmd_sync(priv, cmd); } -EXPORT_SYMBOL(iwl_send_cmd); int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) { @@ -273,7 +271,6 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) return iwl_send_cmd_sync(priv, &cmd); } -EXPORT_SYMBOL(iwl_send_cmd_pdu); int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len, const void *data, @@ -292,4 +289,3 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, return iwl_send_cmd_async(priv, &cmd); } -EXPORT_SYMBOL(iwl_send_cmd_pdu_async); diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c index 46ccdf406e8e..d7f2a0bb32c9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-led.c @@ -48,31 +48,19 @@ module_param(led_mode, int, S_IRUGO); MODULE_PARM_DESC(led_mode, "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking"); -static const struct { - u16 tpt; /* Mb/s */ - u8 on_time; - u8 off_time; -} blink_tbl[] = -{ - {300, 25, 25}, - {200, 40, 40}, - {100, 55, 55}, - {70, 65, 65}, - {50, 75, 75}, - {20, 85, 85}, - {10, 95, 95}, - {5, 110, 110}, - {1, 130, 130}, - {0, 167, 167}, - /* SOLID_ON */ - {-1, IWL_LED_SOLID, 0} +static const struct ieee80211_tpt_blink iwl_blink[] = { + { .throughput = 0 * 1024 - 1, .blink_time = 334 }, + { .throughput = 1 * 1024 - 1, .blink_time = 260 }, + { .throughput = 5 * 1024 - 1, .blink_time = 220 }, + { .throughput = 10 * 1024 - 1, .blink_time = 190 }, + { .throughput = 20 * 1024 - 1, .blink_time = 170 }, + { .throughput = 50 * 1024 - 1, .blink_time = 150 }, + { .throughput = 70 * 1024 - 1, .blink_time = 130 }, + { .throughput = 100 * 1024 - 1, .blink_time = 110 }, + { .throughput = 200 * 1024 - 1, .blink_time = 80 }, + { .throughput = 300 * 1024 - 1, .blink_time = 50 }, }; -#define IWL_1MB_RATE (128 * 1024) -#define IWL_LED_THRESHOLD (16) -#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */ -#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1) - /* * Adjust led blink rate to compensate on a MAC Clock difference on every HW * Led blink rate analysis showed an average deviation of 0% on 3945, @@ -97,133 +85,102 @@ static inline u8 iwl_blink_compensation(struct iwl_priv *priv, } /* Set led pattern command */ -static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx) +static int iwl_led_cmd(struct iwl_priv *priv, + unsigned long on, + unsigned long off) { struct iwl_led_cmd led_cmd = { .id = IWL_LED_LINK, .interval = IWL_DEF_LED_INTRVL }; + int ret; + + if (!test_bit(STATUS_READY, &priv->status)) + return -EBUSY; - BUG_ON(idx > IWL_MAX_BLINK_TBL); + if (priv->blink_on == on && priv->blink_off == off) + return 0; - IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n", + IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", priv->cfg->base_params->led_compensation); - led_cmd.on = - iwl_blink_compensation(priv, blink_tbl[idx].on_time, + led_cmd.on = iwl_blink_compensation(priv, on, priv->cfg->base_params->led_compensation); - led_cmd.off = - iwl_blink_compensation(priv, blink_tbl[idx].off_time, + led_cmd.off = iwl_blink_compensation(priv, off, priv->cfg->base_params->led_compensation); - return priv->cfg->ops->led->cmd(priv, &led_cmd); + ret = priv->cfg->ops->led->cmd(priv, &led_cmd); + if (!ret) { + priv->blink_on = on; + priv->blink_off = off; + } + return ret; } -int iwl_led_start(struct iwl_priv *priv) +static void iwl_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) { - return priv->cfg->ops->led->on(priv); -} -EXPORT_SYMBOL(iwl_led_start); + struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); + unsigned long on = 0; -int iwl_led_associate(struct iwl_priv *priv) -{ - IWL_DEBUG_LED(priv, "Associated\n"); - if (priv->cfg->led_mode == IWL_LED_BLINK) - priv->allow_blinking = 1; - priv->last_blink_time = jiffies; + if (brightness > 0) + on = IWL_LED_SOLID; - return 0; + iwl_led_cmd(priv, on, 0); } -EXPORT_SYMBOL(iwl_led_associate); -int iwl_led_disassociate(struct iwl_priv *priv) +static int iwl_led_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) { - priv->allow_blinking = 0; + struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); - return 0; + return iwl_led_cmd(priv, *delay_on, *delay_off); } -EXPORT_SYMBOL(iwl_led_disassociate); -/* - * calculate blink rate according to last second Tx/Rx activities - */ -static int iwl_get_blink_rate(struct iwl_priv *priv) -{ - int i; - /* count both tx and rx traffic to be able to - * handle traffic in either direction - */ - u64 current_tpt = priv->tx_stats.data_bytes + - priv->rx_stats.data_bytes; - s64 tpt = current_tpt - priv->led_tpt; - - if (tpt < 0) /* wraparound */ - tpt = -tpt; - - IWL_DEBUG_LED(priv, "tpt %lld current_tpt %llu\n", - (long long)tpt, - (unsigned long long)current_tpt); - priv->led_tpt = current_tpt; - - if (!priv->allow_blinking) - i = IWL_MAX_BLINK_TBL; - else - for (i = 0; i < IWL_MAX_BLINK_TBL; i++) - if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE)) - break; - - IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i); - return i; -} - -/* - * this function called from handler. Since setting Led command can - * happen very frequent we postpone led command to be called from - * REPLY handler so we know ucode is up - */ -void iwl_leds_background(struct iwl_priv *priv) +void iwl_leds_init(struct iwl_priv *priv) { - u8 blink_idx; - - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { - priv->last_blink_time = 0; - return; - } - if (iwl_is_rfkill(priv)) { - priv->last_blink_time = 0; - return; + int mode = led_mode; + int ret; + + if (mode == IWL_LED_DEFAULT) + mode = priv->cfg->led_mode; + + priv->led.name = kasprintf(GFP_KERNEL, "%s-led", + wiphy_name(priv->hw->wiphy)); + priv->led.brightness_set = iwl_led_brightness_set; + priv->led.blink_set = iwl_led_blink_set; + priv->led.max_brightness = 1; + + switch (mode) { + case IWL_LED_DEFAULT: + WARN_ON(1); + break; + case IWL_LED_BLINK: + priv->led.default_trigger = + ieee80211_create_tpt_led_trigger(priv->hw, + IEEE80211_TPT_LEDTRIG_FL_CONNECTED, + iwl_blink, ARRAY_SIZE(iwl_blink)); + break; + case IWL_LED_RF_STATE: + priv->led.default_trigger = + ieee80211_get_radio_led_name(priv->hw); + break; } - if (!priv->allow_blinking) { - priv->last_blink_time = 0; - if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) { - priv->last_blink_rate = IWL_SOLID_BLINK_IDX; - iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX); - } + ret = led_classdev_register(&priv->pci_dev->dev, &priv->led); + if (ret) { + kfree(priv->led.name); return; } - if (!priv->last_blink_time || - !time_after(jiffies, priv->last_blink_time + - msecs_to_jiffies(1000))) - return; - - blink_idx = iwl_get_blink_rate(priv); - - /* call only if blink rate change */ - if (blink_idx != priv->last_blink_rate) - iwl_led_pattern(priv, blink_idx); - priv->last_blink_time = jiffies; - priv->last_blink_rate = blink_idx; + priv->led_registered = true; } -EXPORT_SYMBOL(iwl_leds_background); -void iwl_leds_init(struct iwl_priv *priv) +void iwl_leds_exit(struct iwl_priv *priv) { - priv->last_blink_rate = 0; - priv->last_blink_time = 0; - priv->allow_blinking = 0; - if (led_mode != IWL_LED_DEFAULT && - led_mode != priv->cfg->led_mode) - priv->cfg->led_mode = led_mode; + if (!priv->led_registered) + return; + + led_classdev_unregister(&priv->led); + kfree(priv->led.name); } -EXPORT_SYMBOL(iwl_leds_init); diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h index 9079b33486ef..101eef12b3bb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-led.h +++ b/drivers/net/wireless/iwlwifi/iwl-led.h @@ -31,23 +31,14 @@ struct iwl_priv; #define IWL_LED_SOLID 11 -#define IWL_LED_NAME_LEN 31 #define IWL_DEF_LED_INTRVL cpu_to_le32(1000) #define IWL_LED_ACTIVITY (0<<1) #define IWL_LED_LINK (1<<1) -enum led_type { - IWL_LED_TRG_TX, - IWL_LED_TRG_RX, - IWL_LED_TRG_ASSOC, - IWL_LED_TRG_RADIO, - IWL_LED_TRG_MAX, -}; - /* * LED mode - * IWL_LED_DEFAULT: use system default + * IWL_LED_DEFAULT: use device default * IWL_LED_RF_STATE: turn LED on/off based on RF state * LED ON = RF ON * LED OFF = RF OFF @@ -60,9 +51,6 @@ enum iwl_led_mode { }; void iwl_leds_init(struct iwl_priv *priv); -void iwl_leds_background(struct iwl_priv *priv); -int iwl_led_start(struct iwl_priv *priv); -int iwl_led_associate(struct iwl_priv *priv); -int iwl_led_disassociate(struct iwl_priv *priv); +void iwl_leds_exit(struct iwl_priv *priv); #endif /* __iwl_leds_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c deleted file mode 100644 index bb1a742a98a0..000000000000 --- a/drivers/net/wireless/iwlwifi/iwl-legacy.c +++ /dev/null @@ -1,662 +0,0 @@ -/****************************************************************************** - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless <ilw@linux.intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - *****************************************************************************/ - -#include <linux/kernel.h> -#include <net/mac80211.h> - -#include "iwl-dev.h" -#include "iwl-core.h" -#include "iwl-helpers.h" -#include "iwl-legacy.h" - -static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx) -{ - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) - return; - - if (!ctx->is_active) - return; - - ctx->qos_data.def_qos_parm.qos_flags = 0; - - if (ctx->qos_data.qos_active) - ctx->qos_data.def_qos_parm.qos_flags |= - QOS_PARAM_FLG_UPDATE_EDCA_MSK; - - if (ctx->ht.enabled) - ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; - - IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", - ctx->qos_data.qos_active, - ctx->qos_data.def_qos_parm.qos_flags); - - iwl_send_cmd_pdu_async(priv, ctx->qos_cmd, - sizeof(struct iwl_qosparam_cmd), - &ctx->qos_data.def_qos_parm, NULL); -} - -/** - * iwl_legacy_mac_config - mac80211 config callback - */ -int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed) -{ - struct iwl_priv *priv = hw->priv; - const struct iwl_channel_info *ch_info; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_channel *channel = conf->channel; - struct iwl_ht_config *ht_conf = &priv->current_ht_config; - struct iwl_rxon_context *ctx; - unsigned long flags = 0; - int ret = 0; - u16 ch; - int scan_active = 0; - bool ht_changed[NUM_IWL_RXON_CTX] = {}; - - if (WARN_ON(!priv->cfg->ops->legacy)) - return -EOPNOTSUPP; - - mutex_lock(&priv->mutex); - - IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n", - channel->hw_value, changed); - - if (unlikely(!priv->cfg->mod_params->disable_hw_scan && - test_bit(STATUS_SCANNING, &priv->status))) { - scan_active = 1; - IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); - } - - if (changed & (IEEE80211_CONF_CHANGE_SMPS | - IEEE80211_CONF_CHANGE_CHANNEL)) { - /* mac80211 uses static for non-HT which is what we want */ - priv->current_ht_config.smps = conf->smps_mode; - - /* - * Recalculate chain counts. - * - * If monitor mode is enabled then mac80211 will - * set up the SM PS mode to OFF if an HT channel is - * configured. - */ - if (priv->cfg->ops->hcmd->set_rxon_chain) - for_each_context(priv, ctx) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - } - - /* during scanning mac80211 will delay channel setting until - * scan finish with changed = 0 - */ - if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) { - if (scan_active) - goto set_ch_out; - - ch = channel->hw_value; - ch_info = iwl_get_channel_info(priv, channel->band, ch); - if (!is_channel_valid(ch_info)) { - IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n"); - ret = -EINVAL; - goto set_ch_out; - } - - spin_lock_irqsave(&priv->lock, flags); - - for_each_context(priv, ctx) { - /* Configure HT40 channels */ - if (ctx->ht.enabled != conf_is_ht(conf)) { - ctx->ht.enabled = conf_is_ht(conf); - ht_changed[ctx->ctxid] = true; - } - if (ctx->ht.enabled) { - if (conf_is_ht40_minus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_BELOW; - ctx->ht.is_40mhz = true; - } else if (conf_is_ht40_plus(conf)) { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_ABOVE; - ctx->ht.is_40mhz = true; - } else { - ctx->ht.extension_chan_offset = - IEEE80211_HT_PARAM_CHA_SEC_NONE; - ctx->ht.is_40mhz = false; - } - } else - ctx->ht.is_40mhz = false; - - /* - * Default to no protection. Protection mode will - * later be set from BSS config in iwl_ht_conf - */ - ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; - - /* if we are switching from ht to 2.4 clear flags - * from any ht related info since 2.4 does not - * support ht */ - if ((le16_to_cpu(ctx->staging.channel) != ch)) - ctx->staging.flags = 0; - - iwl_set_rxon_channel(priv, channel, ctx); - iwl_set_rxon_ht(priv, ht_conf); - - iwl_set_flags_for_band(priv, ctx, channel->band, - ctx->vif); - } - - spin_unlock_irqrestore(&priv->lock, flags); - - if (priv->cfg->ops->legacy->update_bcast_stations) - ret = priv->cfg->ops->legacy->update_bcast_stations(priv); - - set_ch_out: - /* The list of supported rates and rate mask can be different - * for each band; since the band may have changed, reset - * the rate mask to what mac80211 lists */ - iwl_set_rate(priv); - } - - if (changed & (IEEE80211_CONF_CHANGE_PS | - IEEE80211_CONF_CHANGE_IDLE)) { - ret = iwl_power_update_mode(priv, false); - if (ret) - IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); - } - - if (changed & IEEE80211_CONF_CHANGE_POWER) { - IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n", - priv->tx_power_user_lmt, conf->power_level); - - iwl_set_tx_power(priv, conf->power_level, false); - } - - if (!iwl_is_ready(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); - goto out; - } - - if (scan_active) - goto out; - - for_each_context(priv, ctx) { - if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging))) - iwlcore_commit_rxon(priv, ctx); - else - IWL_DEBUG_INFO(priv, - "Not re-sending same RXON configuration.\n"); - if (ht_changed[ctx->ctxid]) - iwl_update_qos(priv, ctx); - } - -out: - IWL_DEBUG_MAC80211(priv, "leave\n"); - mutex_unlock(&priv->mutex); - return ret; -} -EXPORT_SYMBOL(iwl_legacy_mac_config); - -void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw) -{ - struct iwl_priv *priv = hw->priv; - unsigned long flags; - /* IBSS can only be the IWL_RXON_CTX_BSS context */ - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - - if (WARN_ON(!priv->cfg->ops->legacy)) - return; - - mutex_lock(&priv->mutex); - IWL_DEBUG_MAC80211(priv, "enter\n"); - - spin_lock_irqsave(&priv->lock, flags); - memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); - spin_unlock_irqrestore(&priv->lock, flags); - - spin_lock_irqsave(&priv->lock, flags); - - /* new association get rid of ibss beacon skb */ - if (priv->beacon_skb) - dev_kfree_skb(priv->beacon_skb); - - priv->beacon_skb = NULL; - - priv->timestamp = 0; - - spin_unlock_irqrestore(&priv->lock, flags); - - iwl_scan_cancel_timeout(priv, 100); - if (!iwl_is_ready_rf(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); - mutex_unlock(&priv->mutex); - return; - } - - /* we are restarting association process - * clear RXON_FILTER_ASSOC_MSK bit - */ - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - iwlcore_commit_rxon(priv, ctx); - - iwl_set_rate(priv); - - mutex_unlock(&priv->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} -EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf); - -static void iwl_ht_conf(struct iwl_priv *priv, - struct ieee80211_vif *vif) -{ - struct iwl_ht_config *ht_conf = &priv->current_ht_config; - struct ieee80211_sta *sta; - struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - - IWL_DEBUG_ASSOC(priv, "enter:\n"); - - if (!ctx->ht.enabled) - return; - - ctx->ht.protection = - bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION; - ctx->ht.non_gf_sta_present = - !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); - - ht_conf->single_chain_sufficient = false; - - switch (vif->type) { - case NL80211_IFTYPE_STATION: - rcu_read_lock(); - sta = ieee80211_find_sta(vif, bss_conf->bssid); - if (sta) { - struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; - int maxstreams; - - maxstreams = (ht_cap->mcs.tx_params & - IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK) - >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; - maxstreams += 1; - - if ((ht_cap->mcs.rx_mask[1] == 0) && - (ht_cap->mcs.rx_mask[2] == 0)) - ht_conf->single_chain_sufficient = true; - if (maxstreams <= 1) - ht_conf->single_chain_sufficient = true; - } else { - /* - * If at all, this can only happen through a race - * when the AP disconnects us while we're still - * setting up the connection, in that case mac80211 - * will soon tell us about that. - */ - ht_conf->single_chain_sufficient = true; - } - rcu_read_unlock(); - break; - case NL80211_IFTYPE_ADHOC: - ht_conf->single_chain_sufficient = true; - break; - default: - break; - } - - IWL_DEBUG_ASSOC(priv, "leave\n"); -} - -static inline void iwl_set_no_assoc(struct iwl_priv *priv, - struct ieee80211_vif *vif) -{ - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - - iwl_led_disassociate(priv); - /* - * inform the ucode that there is no longer an - * association and that no more packets should be - * sent - */ - ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; - ctx->staging.assoc_id = 0; - iwlcore_commit_rxon(priv, ctx); -} - -static void iwlcore_beacon_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_priv *priv = hw->priv; - unsigned long flags; - __le64 timestamp; - struct sk_buff *skb = ieee80211_beacon_get(hw, vif); - - if (!skb) - return; - - IWL_DEBUG_MAC80211(priv, "enter\n"); - - lockdep_assert_held(&priv->mutex); - - if (!priv->beacon_ctx) { - IWL_ERR(priv, "update beacon but no beacon context!\n"); - dev_kfree_skb(skb); - return; - } - - spin_lock_irqsave(&priv->lock, flags); - - if (priv->beacon_skb) - dev_kfree_skb(priv->beacon_skb); - - priv->beacon_skb = skb; - - timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; - priv->timestamp = le64_to_cpu(timestamp); - - IWL_DEBUG_MAC80211(priv, "leave\n"); - spin_unlock_irqrestore(&priv->lock, flags); - - if (!iwl_is_ready_rf(priv)) { - IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); - return; - } - - priv->cfg->ops->legacy->post_associate(priv); -} - -void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf, - u32 changes) -{ - struct iwl_priv *priv = hw->priv; - struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); - int ret; - - if (WARN_ON(!priv->cfg->ops->legacy)) - return; - - IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes); - - if (!iwl_is_alive(priv)) - return; - - mutex_lock(&priv->mutex); - - if (changes & BSS_CHANGED_QOS) { - unsigned long flags; - - spin_lock_irqsave(&priv->lock, flags); - ctx->qos_data.qos_active = bss_conf->qos; - iwl_update_qos(priv, ctx); - spin_unlock_irqrestore(&priv->lock, flags); - } - - if (changes & BSS_CHANGED_BEACON_ENABLED) { - /* - * the add_interface code must make sure we only ever - * have a single interface that could be beaconing at - * any time. - */ - if (vif->bss_conf.enable_beacon) - priv->beacon_ctx = ctx; - else - priv->beacon_ctx = NULL; - } - - if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) { - dev_kfree_skb(priv->beacon_skb); - priv->beacon_skb = ieee80211_beacon_get(hw, vif); - } - - if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP) - iwl_send_rxon_timing(priv, ctx); - - if (changes & BSS_CHANGED_BSSID) { - IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid); - - /* - * If there is currently a HW scan going on in the - * background then we need to cancel it else the RXON - * below/in post_associate will fail. - */ - if (iwl_scan_cancel_timeout(priv, 100)) { - IWL_WARN(priv, "Aborted scan still in progress after 100ms\n"); - IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n"); - mutex_unlock(&priv->mutex); - return; - } - - /* mac80211 only sets assoc when in STATION mode */ - if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) { - memcpy(ctx->staging.bssid_addr, - bss_conf->bssid, ETH_ALEN); - - /* currently needed in a few places */ - memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); - } else { - ctx->staging.filter_flags &= - ~RXON_FILTER_ASSOC_MSK; - } - - } - - /* - * This needs to be after setting the BSSID in case - * mac80211 decides to do both changes at once because - * it will invoke post_associate. - */ - if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON) - iwlcore_beacon_update(hw, vif); - - if (changes & BSS_CHANGED_ERP_PREAMBLE) { - IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n", - bss_conf->use_short_preamble); - if (bss_conf->use_short_preamble) - ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; - else - ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; - } - - if (changes & BSS_CHANGED_ERP_CTS_PROT) { - IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot); - if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ)) - ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; - else - ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; - if (bss_conf->use_cts_prot) - ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; - else - ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; - } - - if (changes & BSS_CHANGED_BASIC_RATES) { - /* XXX use this information - * - * To do that, remove code from iwl_set_rate() and put something - * like this here: - * - if (A-band) - ctx->staging.ofdm_basic_rates = - bss_conf->basic_rates; - else - ctx->staging.ofdm_basic_rates = - bss_conf->basic_rates >> 4; - ctx->staging.cck_basic_rates = - bss_conf->basic_rates & 0xF; - */ - } - - if (changes & BSS_CHANGED_HT) { - iwl_ht_conf(priv, vif); - - if (priv->cfg->ops->hcmd->set_rxon_chain) - priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); - } - - if (changes & BSS_CHANGED_ASSOC) { - IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc); - if (bss_conf->assoc) { - priv->timestamp = bss_conf->timestamp; - - iwl_led_associate(priv); - - if (!iwl_is_rfkill(priv)) - priv->cfg->ops->legacy->post_associate(priv); - } else - iwl_set_no_assoc(priv, vif); - } - - if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) { - IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n", - changes); - ret = iwl_send_rxon_assoc(priv, ctx); - if (!ret) { - /* Sync active_rxon with latest change. */ - memcpy((void *)&ctx->active, - &ctx->staging, - sizeof(struct iwl_rxon_cmd)); - } - } - - if (changes & BSS_CHANGED_BEACON_ENABLED) { - if (vif->bss_conf.enable_beacon) { - memcpy(ctx->staging.bssid_addr, - bss_conf->bssid, ETH_ALEN); - memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); - iwl_led_associate(priv); - priv->cfg->ops->legacy->config_ap(priv); - } else - iwl_set_no_assoc(priv, vif); - } - - if (changes & BSS_CHANGED_IBSS) { - ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif, - bss_conf->ibss_joined); - if (ret) - IWL_ERR(priv, "failed to %s IBSS station %pM\n", - bss_conf->ibss_joined ? "add" : "remove", - bss_conf->bssid); - } - - mutex_unlock(&priv->mutex); - - IWL_DEBUG_MAC80211(priv, "leave\n"); -} -EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed); - -irqreturn_t iwl_isr_legacy(int irq, void *data) -{ - struct iwl_priv *priv = data; - u32 inta, inta_mask; - u32 inta_fh; - unsigned long flags; - if (!priv) - return IRQ_NONE; - - spin_lock_irqsave(&priv->lock, flags); - - /* Disable (but don't clear!) interrupts here to avoid - * back-to-back ISRs and sporadic interrupts from our NIC. - * If we have something to service, the tasklet will re-enable ints. - * If we *don't* have something, we'll re-enable before leaving here. */ - inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ - iwl_write32(priv, CSR_INT_MASK, 0x00000000); - - /* Discover which interrupts are active/pending */ - inta = iwl_read32(priv, CSR_INT); - inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); - - /* Ignore interrupt if there's nothing in NIC to service. - * This may be due to IRQ shared with another device, - * or due to sporadic interrupts thrown from our NIC. */ - if (!inta && !inta_fh) { - IWL_DEBUG_ISR(priv, - "Ignore interrupt, inta == 0, inta_fh == 0\n"); - goto none; - } - - if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { - /* Hardware disappeared. It might have already raised - * an interrupt */ - IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); - goto unplugged; - } - - IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", - inta, inta_mask, inta_fh); - - inta &= ~CSR_INT_BIT_SCD; - - /* iwl_irq_tasklet() will service interrupts and re-enable them */ - if (likely(inta || inta_fh)) - tasklet_schedule(&priv->irq_tasklet); - -unplugged: - spin_unlock_irqrestore(&priv->lock, flags); - return IRQ_HANDLED; - -none: - /* re-enable interrupts here since we don't have anything to service. */ - /* only Re-enable if disabled by irq */ - if (test_bit(STATUS_INT_ENABLED, &priv->status)) - iwl_enable_interrupts(priv); - spin_unlock_irqrestore(&priv->lock, flags); - return IRQ_NONE; -} -EXPORT_SYMBOL(iwl_isr_legacy); - -/* - * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this - * function. - */ -void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv, - struct ieee80211_tx_info *info, - __le16 fc, __le32 *tx_flags) -{ - if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { - *tx_flags |= TX_CMD_FLG_RTS_MSK; - *tx_flags &= ~TX_CMD_FLG_CTS_MSK; - *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; - - if (!ieee80211_is_mgmt(fc)) - return; - - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_AUTH): - case cpu_to_le16(IEEE80211_STYPE_DEAUTH): - case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): - case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): - *tx_flags &= ~TX_CMD_FLG_RTS_MSK; - *tx_flags |= TX_CMD_FLG_CTS_MSK; - break; - } - } else if (info->control.rates[0].flags & - IEEE80211_TX_RC_USE_CTS_PROTECT) { - *tx_flags &= ~TX_CMD_FLG_RTS_MSK; - *tx_flags |= TX_CMD_FLG_CTS_MSK; - *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; - } -} -EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection); diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c index 1eec18d909d8..576795e2c75b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-power.c +++ b/drivers/net/wireless/iwlwifi/iwl-power.c @@ -226,8 +226,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv, else cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (iwl_advanced_bt_coexist(priv)) { if (!priv->cfg->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else @@ -313,8 +312,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv, else cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; - if (priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist) { + if (iwl_advanced_bt_coexist(priv)) { if (!priv->cfg->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else @@ -358,8 +356,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv, if (priv->cfg->base_params->broken_powersave) iwl_power_sleep_cam_cmd(priv, cmd); - else if (priv->cfg->base_params->supports_idle && - priv->hw->conf.flags & IEEE80211_CONF_IDLE) + else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); else if (priv->cfg->ops->lib->tt_ops.lower_power_detection && priv->cfg->ops->lib->tt_ops.tt_power_mode && @@ -428,7 +425,6 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, return ret; } -EXPORT_SYMBOL(iwl_power_set_mode); int iwl_power_update_mode(struct iwl_priv *priv, bool force) { @@ -437,7 +433,6 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force) iwl_power_build_cmd(priv, &cmd); return iwl_power_set_mode(priv, &cmd, force); } -EXPORT_SYMBOL(iwl_power_update_mode); /* initialize to default */ void iwl_power_initialize(struct iwl_priv *priv) @@ -451,4 +446,3 @@ void iwl_power_initialize(struct iwl_priv *priv) memset(&priv->power_data.sleep_cmd, 0, sizeof(priv->power_data.sleep_cmd)); } -EXPORT_SYMBOL(iwl_power_initialize); diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 87a6fd84d4d2..6f9a2fa04763 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c @@ -29,6 +29,7 @@ #include <linux/etherdevice.h> #include <linux/slab.h> +#include <linux/sched.h> #include <net/mac80211.h> #include <asm/unaligned.h> #include "iwl-eeprom.h" @@ -37,7 +38,15 @@ #include "iwl-sta.h" #include "iwl-io.h" #include "iwl-helpers.h" -/************************** RX-FUNCTIONS ****************************/ +#include "iwl-agn-calib.h" +#include "iwl-agn.h" + +/****************************************************************************** + * + * RX path functions + * + ******************************************************************************/ + /* * Rx theory of operation * @@ -118,7 +127,6 @@ int iwl_rx_queue_space(const struct iwl_rx_queue *q) s = 0; return s; } -EXPORT_SYMBOL(iwl_rx_queue_space); /** * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue @@ -170,7 +178,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q exit_unlock: spin_unlock_irqrestore(&q->lock, flags); } -EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); int iwl_rx_queue_alloc(struct iwl_priv *priv) { @@ -211,10 +218,105 @@ err_rb: err_bd: return -ENOMEM; } -EXPORT_SYMBOL(iwl_rx_queue_alloc); + +/****************************************************************************** + * + * Generic RX handler implementations + * + ******************************************************************************/ + +static void iwl_rx_reply_alive(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_alive_resp *palive; + struct delayed_work *pwork; + + palive = &pkt->u.alive_frame; + + IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " + "0x%01X 0x%01X\n", + palive->is_valid, palive->ver_type, + palive->ver_subtype); + + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); + memcpy(&priv->card_alive_init, + &pkt->u.alive_frame, + sizeof(struct iwl_init_alive_resp)); + pwork = &priv->init_alive_start; + } else { + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); + memcpy(&priv->card_alive, &pkt->u.alive_frame, + sizeof(struct iwl_alive_resp)); + pwork = &priv->alive_start; + } + + /* We delay the ALIVE response by 5ms to + * give the HW RF Kill time to activate... */ + if (palive->is_valid == UCODE_VALID_OK) + queue_delayed_work(priv->workqueue, pwork, + msecs_to_jiffies(5)); + else { + IWL_WARN(priv, "%s uCode did not respond OK.\n", + (palive->ver_subtype == INITIALIZE_SUBTYPE) ? + "init" : "runtime"); + /* + * If fail to load init uCode, + * let's try to load the init uCode again. + * We should not get into this situation, but if it + * does happen, we should not move on and loading "runtime" + * without proper calibrate the device. + */ + if (palive->ver_subtype == INITIALIZE_SUBTYPE) + priv->ucode_type = UCODE_NONE; + queue_work(priv->workqueue, &priv->restart); + } +} + +static void iwl_rx_reply_error(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " + "seq 0x%04X ser 0x%08X\n", + le32_to_cpu(pkt->u.err_resp.error_type), + get_cmd_string(pkt->u.err_resp.cmd_id), + pkt->u.err_resp.cmd_id, + le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), + le32_to_cpu(pkt->u.err_resp.error_info)); +} + +static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_csa_notification *csa = &(pkt->u.csa_notif); + /* + * MULTI-FIXME + * See iwl_mac_channel_switch. + */ + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; + struct iwl_rxon_cmd *rxon = (void *)&ctx->active; + + if (priv->switch_rxon.switch_in_progress) { + if (!le32_to_cpu(csa->status) && + (csa->channel == priv->switch_rxon.channel)) { + rxon->channel = csa->channel; + ctx->staging.channel = csa->channel; + IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", + le16_to_cpu(csa->channel)); + iwl_chswitch_done(priv, true); + } else { + IWL_ERR(priv, "CSA notif (fail) : channel %d\n", + le16_to_cpu(csa->channel)); + iwl_chswitch_done(priv, false); + } + } +} -void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, +static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); @@ -229,48 +331,494 @@ void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, memcpy(&priv->measure_report, report, sizeof(*report)); priv->measurement_status |= MEASUREMENT_READY; } -EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif); -void iwl_recover_from_statistics(struct iwl_priv *priv, - struct iwl_rx_packet *pkt) +static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ +#ifdef CONFIG_IWLWIFI_DEBUG + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); + IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", + sleep->pm_sleep_mode, sleep->pm_wakeup_src); +#endif +} + +static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " + "notification for %s:\n", len, + get_cmd_string(pkt->hdr.cmd)); + iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); +} + +static void iwl_rx_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw; +#ifdef CONFIG_IWLWIFI_DEBUG + u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status); + u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); + + IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " + "tsf:0x%.8x%.8x rate:%d\n", + status & TX_STATUS_MSK, + beacon->beacon_notify_hdr.failure_frame, + le32_to_cpu(beacon->ibss_mgr_status), + le32_to_cpu(beacon->high_tsf), + le32_to_cpu(beacon->low_tsf), rate); +#endif + + priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); + + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) + queue_work(priv->workqueue, &priv->beacon_update); +} + +/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ +#define ACK_CNT_RATIO (50) +#define BA_TIMEOUT_CNT (5) +#define BA_TIMEOUT_MAX (16) + +/** + * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries. + * + * When the ACK count ratio is low and aggregated BA timeout retries exceeding + * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal + * operation state. + */ +static bool iwl_good_ack_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt) +{ + int actual_delta, expected_delta, ba_timeout_delta; + struct statistics_tx *cur, *old; + + if (priv->_agn.agg_tids_count) + return true; + + if (iwl_bt_statistics(priv)) { + cur = &pkt->u.stats_bt.tx; + old = &priv->_agn.statistics_bt.tx; + } else { + cur = &pkt->u.stats.tx; + old = &priv->_agn.statistics.tx; + } + + actual_delta = le32_to_cpu(cur->actual_ack_cnt) - + le32_to_cpu(old->actual_ack_cnt); + expected_delta = le32_to_cpu(cur->expected_ack_cnt) - + le32_to_cpu(old->expected_ack_cnt); + + /* Values should not be negative, but we do not trust the firmware */ + if (actual_delta <= 0 || expected_delta <= 0) + return true; + + ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) - + le32_to_cpu(old->agg.ba_timeout); + + if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO && + ba_timeout_delta > BA_TIMEOUT_CNT) { + IWL_DEBUG_RADIO(priv, "deltas: actual %d expected %d ba_timeout %d\n", + actual_delta, expected_delta, ba_timeout_delta); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + /* + * This is ifdef'ed on DEBUGFS because otherwise the + * statistics aren't available. If DEBUGFS is set but + * DEBUG is not, these will just compile out. + */ + IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n", + priv->_agn.delta_statistics.tx.rx_detected_cnt); + IWL_DEBUG_RADIO(priv, + "ack_or_ba_timeout_collision delta %d\n", + priv->_agn.delta_statistics.tx.ack_or_ba_timeout_collision); +#endif + + if (ba_timeout_delta >= BA_TIMEOUT_MAX) + return false; + } + + return true; +} + +/** + * iwl_good_plcp_health - checks for plcp error. + * + * When the plcp error is exceeding the thresholds, reset the radio + * to improve the throughput. + */ +static bool iwl_good_plcp_health(struct iwl_priv *priv, + struct iwl_rx_packet *pkt, unsigned int msecs) { + int delta; + int threshold = priv->cfg->base_params->plcp_delta_threshold; + + if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { + IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); + return true; + } + + if (iwl_bt_statistics(priv)) { + struct statistics_rx_bt *cur, *old; + + cur = &pkt->u.stats_bt.rx; + old = &priv->_agn.statistics_bt.rx; + + delta = le32_to_cpu(cur->ofdm.plcp_err) - + le32_to_cpu(old->ofdm.plcp_err) + + le32_to_cpu(cur->ofdm_ht.plcp_err) - + le32_to_cpu(old->ofdm_ht.plcp_err); + } else { + struct statistics_rx *cur, *old; + + cur = &pkt->u.stats.rx; + old = &priv->_agn.statistics.rx; + + delta = le32_to_cpu(cur->ofdm.plcp_err) - + le32_to_cpu(old->ofdm.plcp_err) + + le32_to_cpu(cur->ofdm_ht.plcp_err) - + le32_to_cpu(old->ofdm_ht.plcp_err); + } + + /* Can be negative if firmware reseted statistics */ + if (delta <= 0) + return true; + + if ((delta * 100 / msecs) > threshold) { + IWL_DEBUG_RADIO(priv, + "plcp health threshold %u delta %d msecs %u\n", + threshold, delta, msecs); + return false; + } + + return true; +} + +static void iwl_recover_from_statistics(struct iwl_priv *priv, + struct iwl_rx_packet *pkt) +{ + const struct iwl_mod_params *mod_params = priv->cfg->mod_params; + unsigned int msecs; + unsigned long stamp; + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return; - if (iwl_is_any_associated(priv)) { - if (priv->cfg->ops->lib->check_ack_health) { - if (!priv->cfg->ops->lib->check_ack_health( - priv, pkt)) { - /* - * low ack count detected - * restart Firmware - */ - IWL_ERR(priv, "low ack count detected, " - "restart firmware\n"); - if (!iwl_force_reset(priv, IWL_FW_RESET, false)) - return; - } + + stamp = jiffies; + msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies); + + /* Only gather statistics and update time stamp when not associated */ + if (!iwl_is_any_associated(priv)) + goto out; + + /* Do not check/recover when do not have enough statistics data */ + if (msecs < 99) + return; + + if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) { + IWL_ERR(priv, "low ack count detected, restart firmware\n"); + if (!iwl_force_reset(priv, IWL_FW_RESET, false)) + return; + } + + if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt, msecs)) + iwl_force_reset(priv, IWL_RF_RESET, false); + +out: + if (iwl_bt_statistics(priv)) + memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt, + sizeof(priv->_agn.statistics_bt)); + else + memcpy(&priv->_agn.statistics, &pkt->u.stats, + sizeof(priv->_agn.statistics)); + + priv->rx_statistics_jiffies = stamp; +} + +/* Calculate noise level, based on measurements during network silence just + * before arriving beacon. This measurement can be done only if we know + * exactly when to expect beacons, therefore only when we're associated. */ +static void iwl_rx_calc_noise(struct iwl_priv *priv) +{ + struct statistics_rx_non_phy *rx_info; + int num_active_rx = 0; + int total_silence = 0; + int bcn_silence_a, bcn_silence_b, bcn_silence_c; + int last_rx_noise; + + if (iwl_bt_statistics(priv)) + rx_info = &(priv->_agn.statistics_bt.rx.general.common); + else + rx_info = &(priv->_agn.statistics.rx.general); + bcn_silence_a = + le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER; + bcn_silence_b = + le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; + bcn_silence_c = + le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; + + if (bcn_silence_a) { + total_silence += bcn_silence_a; + num_active_rx++; + } + if (bcn_silence_b) { + total_silence += bcn_silence_b; + num_active_rx++; + } + if (bcn_silence_c) { + total_silence += bcn_silence_c; + num_active_rx++; + } + + /* Average among active antennas */ + if (num_active_rx) + last_rx_noise = (total_silence / num_active_rx) - 107; + else + last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; + + IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", + bcn_silence_a, bcn_silence_b, bcn_silence_c, + last_rx_noise); +} + +/* + * based on the assumption of all statistics counter are in DWORD + * FIXME: This function is for debugging, do not deal with + * the case of counters roll-over. + */ +static void iwl_accumulative_statistics(struct iwl_priv *priv, + __le32 *stats) +{ +#ifdef CONFIG_IWLWIFI_DEBUGFS + int i, size; + __le32 *prev_stats; + u32 *accum_stats; + u32 *delta, *max_delta; + struct statistics_general_common *general, *accum_general; + struct statistics_tx *tx, *accum_tx; + + if (iwl_bt_statistics(priv)) { + prev_stats = (__le32 *)&priv->_agn.statistics_bt; + accum_stats = (u32 *)&priv->_agn.accum_statistics_bt; + size = sizeof(struct iwl_bt_notif_statistics); + general = &priv->_agn.statistics_bt.general.common; + accum_general = &priv->_agn.accum_statistics_bt.general.common; + tx = &priv->_agn.statistics_bt.tx; + accum_tx = &priv->_agn.accum_statistics_bt.tx; + delta = (u32 *)&priv->_agn.delta_statistics_bt; + max_delta = (u32 *)&priv->_agn.max_delta_bt; + } else { + prev_stats = (__le32 *)&priv->_agn.statistics; + accum_stats = (u32 *)&priv->_agn.accum_statistics; + size = sizeof(struct iwl_notif_statistics); + general = &priv->_agn.statistics.general.common; + accum_general = &priv->_agn.accum_statistics.general.common; + tx = &priv->_agn.statistics.tx; + accum_tx = &priv->_agn.accum_statistics.tx; + delta = (u32 *)&priv->_agn.delta_statistics; + max_delta = (u32 *)&priv->_agn.max_delta; + } + for (i = sizeof(__le32); i < size; + i += sizeof(__le32), stats++, prev_stats++, delta++, + max_delta++, accum_stats++) { + if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) { + *delta = (le32_to_cpu(*stats) - + le32_to_cpu(*prev_stats)); + *accum_stats += *delta; + if (*delta > *max_delta) + *max_delta = *delta; } - if (priv->cfg->ops->lib->check_plcp_health) { - if (!priv->cfg->ops->lib->check_plcp_health( - priv, pkt)) { - /* - * high plcp error detected - * reset Radio - */ - iwl_force_reset(priv, IWL_RF_RESET, false); - } + } + + /* reset accumulative statistics for "no-counter" type statistics */ + accum_general->temperature = general->temperature; + accum_general->temperature_m = general->temperature_m; + accum_general->ttl_timestamp = general->ttl_timestamp; + accum_tx->tx_power.ant_a = tx->tx_power.ant_a; + accum_tx->tx_power.ant_b = tx->tx_power.ant_b; + accum_tx->tx_power.ant_c = tx->tx_power.ant_c; +#endif +} + +static void iwl_rx_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + const int reg_recalib_period = 60; + int change; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + if (iwl_bt_statistics(priv)) { + IWL_DEBUG_RX(priv, + "Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl_bt_notif_statistics), + le32_to_cpu(pkt->len_n_flags) & + FH_RSCSR_FRAME_SIZE_MSK); + + change = ((priv->_agn.statistics_bt.general.common.temperature != + pkt->u.stats_bt.general.common.temperature) || + ((priv->_agn.statistics_bt.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK) != + (pkt->u.stats_bt.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK))); + + iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt); + } else { + IWL_DEBUG_RX(priv, + "Statistics notification received (%d vs %d).\n", + (int)sizeof(struct iwl_notif_statistics), + le32_to_cpu(pkt->len_n_flags) & + FH_RSCSR_FRAME_SIZE_MSK); + + change = ((priv->_agn.statistics.general.common.temperature != + pkt->u.stats.general.common.temperature) || + ((priv->_agn.statistics.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK) != + (pkt->u.stats.flag & + STATISTICS_REPLY_FLG_HT40_MODE_MSK))); + + iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); + } + + iwl_recover_from_statistics(priv, pkt); + + set_bit(STATUS_STATISTICS, &priv->status); + + /* Reschedule the statistics timer to occur in + * reg_recalib_period seconds to ensure we get a + * thermal update even if the uCode doesn't give + * us one */ + mod_timer(&priv->statistics_periodic, jiffies + + msecs_to_jiffies(reg_recalib_period * 1000)); + + if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && + (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { + iwl_rx_calc_noise(priv); + queue_work(priv->workqueue, &priv->run_time_calib_work); + } + if (priv->cfg->ops->lib->temp_ops.temperature && change) + priv->cfg->ops->lib->temp_ops.temperature(priv); +} + +static void iwl_rx_reply_statistics(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) { +#ifdef CONFIG_IWLWIFI_DEBUGFS + memset(&priv->_agn.accum_statistics, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->_agn.delta_statistics, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->_agn.max_delta, 0, + sizeof(struct iwl_notif_statistics)); + memset(&priv->_agn.accum_statistics_bt, 0, + sizeof(struct iwl_bt_notif_statistics)); + memset(&priv->_agn.delta_statistics_bt, 0, + sizeof(struct iwl_bt_notif_statistics)); + memset(&priv->_agn.max_delta_bt, 0, + sizeof(struct iwl_bt_notif_statistics)); +#endif + IWL_DEBUG_RX(priv, "Statistics have been cleared\n"); + } + iwl_rx_statistics(priv, rxb); +} + +/* Handle notification from uCode that card's power state is changing + * due to software, hardware, or critical temperature RFKILL */ +static void iwl_rx_card_state_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); + unsigned long status = priv->status; + + IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", + (flags & HW_CARD_DISABLED) ? "Kill" : "On", + (flags & SW_CARD_DISABLED) ? "Kill" : "On", + (flags & CT_CARD_DISABLED) ? + "Reached" : "Not reached"); + + if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | + CT_CARD_DISABLED)) { + + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + + iwl_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); + + if (!(flags & RXON_CARD_DISABLED)) { + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); + iwl_write_direct32(priv, HBUS_TARG_MBX_C, + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); } + if (flags & CT_CARD_DISABLED) + iwl_tt_enter_ct_kill(priv); + } + if (!(flags & CT_CARD_DISABLED)) + iwl_tt_exit_ct_kill(priv); + + if (flags & HW_CARD_DISABLED) + set_bit(STATUS_RF_KILL_HW, &priv->status); + else + clear_bit(STATUS_RF_KILL_HW, &priv->status); + + + if (!(flags & RXON_CARD_DISABLED)) + iwl_scan_cancel(priv); + + if ((test_bit(STATUS_RF_KILL_HW, &status) != + test_bit(STATUS_RF_KILL_HW, &priv->status))) + wiphy_rfkill_set_hw_state(priv->hw->wiphy, + test_bit(STATUS_RF_KILL_HW, &priv->status)); + else + wake_up_interruptible(&priv->wait_command_queue); +} + +static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) + +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_missed_beacon_notif *missed_beacon; + + missed_beacon = &pkt->u.missed_beacon; + if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) > + priv->missed_beacon_threshold) { + IWL_DEBUG_CALIB(priv, + "missed bcn cnsq %d totl %d rcd %d expctd %d\n", + le32_to_cpu(missed_beacon->consecutive_missed_beacons), + le32_to_cpu(missed_beacon->total_missed_becons), + le32_to_cpu(missed_beacon->num_recvd_beacons), + le32_to_cpu(missed_beacon->num_expected_beacons)); + if (!test_bit(STATUS_SCANNING, &priv->status)) + iwl_init_sensitivity(priv); } } -EXPORT_SYMBOL(iwl_recover_from_statistics); + +/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). + * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ +static void iwl_rx_reply_rx_phy(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + priv->_agn.last_phy_res_valid = true; + memcpy(&priv->_agn.last_phy_res, pkt->u.raw, + sizeof(struct iwl_rx_phy_res)); +} /* * returns non-zero if packet should be dropped */ -int iwl_set_decrypted_flag(struct iwl_priv *priv, - struct ieee80211_hdr *hdr, - u32 decrypt_res, - struct ieee80211_rx_status *stats) +static int iwl_set_decrypted_flag(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u32 decrypt_res, + struct ieee80211_rx_status *stats) { u16 fc = le16_to_cpu(hdr->frame_control); @@ -315,4 +863,264 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv, } return 0; } -EXPORT_SYMBOL(iwl_set_decrypted_flag); + +static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, + struct ieee80211_hdr *hdr, + u16 len, + u32 ampdu_status, + struct iwl_rx_mem_buffer *rxb, + struct ieee80211_rx_status *stats) +{ + struct sk_buff *skb; + __le16 fc = hdr->frame_control; + + /* We only process data packets if the interface is open */ + if (unlikely(!priv->is_open)) { + IWL_DEBUG_DROP_LIMIT(priv, + "Dropping packet while interface is not open.\n"); + return; + } + + /* In case of HW accelerated crypto and bad decryption, drop */ + if (!priv->cfg->mod_params->sw_crypto && + iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) + return; + + skb = dev_alloc_skb(128); + if (!skb) { + IWL_ERR(priv, "dev_alloc_skb failed\n"); + return; + } + + skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); + + iwl_update_stats(priv, false, fc, len); + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx(priv->hw, skb); + priv->alloc_rxb_page--; + rxb->page = NULL; +} + +static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) +{ + u32 decrypt_out = 0; + + if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == + RX_RES_STATUS_STATION_FOUND) + decrypt_out |= (RX_RES_STATUS_STATION_FOUND | + RX_RES_STATUS_NO_STATION_INFO_MISMATCH); + + decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); + + /* packet was not encrypted */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_NONE) + return decrypt_out; + + /* packet was encrypted with unknown alg */ + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == + RX_RES_STATUS_SEC_TYPE_ERR) + return decrypt_out; + + /* decryption was not done in HW */ + if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != + RX_MPDU_RES_STATUS_DEC_DONE_MSK) + return decrypt_out; + + switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { + + case RX_RES_STATUS_SEC_TYPE_CCMP: + /* alg is CCM: check MIC only */ + if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) + /* Bad MIC */ + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + + break; + + case RX_RES_STATUS_SEC_TYPE_TKIP: + if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { + /* Bad TTAK */ + decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; + break; + } + /* fall through if TTAK OK */ + default: + if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; + else + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; + break; + } + + IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", + decrypt_in, decrypt_out); + + return decrypt_out; +} + +/* Called for REPLY_RX (legacy ABG frames), or + * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ +static void iwl_rx_reply_rx(struct iwl_priv *priv, + struct iwl_rx_mem_buffer *rxb) +{ + struct ieee80211_hdr *header; + struct ieee80211_rx_status rx_status; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_phy_res *phy_res; + __le32 rx_pkt_status; + struct iwl_rx_mpdu_res_start *amsdu; + u32 len; + u32 ampdu_status; + u32 rate_n_flags; + + /** + * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. + * REPLY_RX: physical layer info is in this buffer + * REPLY_RX_MPDU_CMD: physical layer info was sent in separate + * command and cached in priv->last_phy_res + * + * Here we set up local variables depending on which command is + * received. + */ + if (pkt->hdr.cmd == REPLY_RX) { + phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt); + + len = le16_to_cpu(phy_res->byte_count); + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + + phy_res->cfg_phy_cnt + len); + ampdu_status = le32_to_cpu(rx_pkt_status); + } else { + if (!priv->_agn.last_phy_res_valid) { + IWL_ERR(priv, "MPDU frame without cached PHY data\n"); + return; + } + phy_res = &priv->_agn.last_phy_res; + amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); + len = le16_to_cpu(amsdu->byte_count); + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); + ampdu_status = iwl_translate_rx_status(priv, + le32_to_cpu(rx_pkt_status)); + } + + if ((unlikely(phy_res->cfg_phy_cnt > 20))) { + IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", + phy_res->cfg_phy_cnt); + return; + } + + if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || + !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { + IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", + le32_to_cpu(rx_pkt_status)); + return; + } + + /* This will be used in several places later */ + rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); + + /* rx_status carries information about the packet to mac80211 */ + rx_status.mactime = le64_to_cpu(phy_res->timestamp); + rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), + rx_status.band); + rx_status.rate_idx = + iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); + rx_status.flag = 0; + + /* TSF isn't reliable. In order to allow smooth user experience, + * this W/A doesn't propagate it to the mac80211 */ + /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ + + priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); + + /* Find max signal strength (dBm) among 3 antenna/receiver chains */ + rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res); + + iwl_dbg_log_rx_data_frame(priv, len, header); + IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", + rx_status.signal, (unsigned long long)rx_status.mactime); + + /* + * "antenna number" + * + * It seems that the antenna field in the phy flags value + * is actually a bit field. This is undefined by radiotap, + * it wants an actual antenna number but I always get "7" + * for most legacy frames I receive indicating that the + * same frame was received on all three RX chains. + * + * I think this field should be removed in favor of a + * new 802.11n radiotap field "RX chains" that is defined + * as a bitmask. + */ + rx_status.antenna = + (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) + >> RX_RES_PHY_FLAGS_ANTENNA_POS; + + /* set the preamble flag if appropriate */ + if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) + rx_status.flag |= RX_FLAG_SHORTPRE; + + /* Set up the HT phy flags */ + if (rate_n_flags & RATE_MCS_HT_MSK) + rx_status.flag |= RX_FLAG_HT; + if (rate_n_flags & RATE_MCS_HT40_MSK) + rx_status.flag |= RX_FLAG_40MHZ; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status.flag |= RX_FLAG_SHORT_GI; + + iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status, + rxb, &rx_status); +} + +/** + * iwl_setup_rx_handlers - Initialize Rx handler callbacks + * + * Setup the RX handlers for each of the reply types sent from the uCode + * to the host. + */ +void iwl_setup_rx_handlers(struct iwl_priv *priv) +{ + void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); + + handlers = priv->rx_handlers; + + handlers[REPLY_ALIVE] = iwl_rx_reply_alive; + handlers[REPLY_ERROR] = iwl_rx_reply_error; + handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; + handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif; + handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; + handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif; + handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; + + /* + * The same handler is used for both the REPLY to a discrete + * statistics request from the host as well as for the periodic + * statistics notifications (after received beacons) from the uCode. + */ + handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics; + handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; + + iwl_setup_rx_scan_handlers(priv); + + handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; + handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif; + + /* Rx handlers */ + handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy; + handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx; + + /* block ack */ + handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba; + + /* Set up hardware specific Rx handlers */ + priv->cfg->ops->lib->rx_handler_setup(priv); +} diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index 12d9363d0afe..3a4d9e6b0421 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -101,7 +101,7 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted) ieee80211_scan_completed(priv->hw, aborted); } - priv->is_internal_short_scan = false; + priv->scan_type = IWL_SCAN_NORMAL; priv->scan_vif = NULL; priv->scan_request = NULL; } @@ -155,7 +155,6 @@ int iwl_scan_cancel(struct iwl_priv *priv) queue_work(priv->workqueue, &priv->abort_scan); return 0; } -EXPORT_SYMBOL(iwl_scan_cancel); /** * iwl_scan_cancel_timeout - Cancel any currently executing HW scan @@ -180,7 +179,6 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) return test_bit(STATUS_SCAN_HW, &priv->status); } -EXPORT_SYMBOL(iwl_scan_cancel_timeout); /* Service response to REPLY_SCAN_CMD (0x80) */ static void iwl_rx_reply_scan(struct iwl_priv *priv, @@ -257,8 +255,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, queue_work(priv->workqueue, &priv->scan_completed); if (priv->iw_mode != NL80211_IFTYPE_ADHOC && - priv->cfg->bt_params && - priv->cfg->bt_params->advanced_bt_coexist && + iwl_advanced_bt_coexist(priv) && priv->bt_status != scan_notif->bt_status) { if (scan_notif->bt_status) { /* BT on */ @@ -289,7 +286,6 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = iwl_rx_scan_complete_notif; } -EXPORT_SYMBOL(iwl_setup_rx_scan_handlers); inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, enum ieee80211_band band, @@ -302,7 +298,6 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, return IWL_ACTIVE_DWELL_TIME_24 + IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1); } -EXPORT_SYMBOL(iwl_get_active_dwell_time); u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, enum ieee80211_band band, @@ -334,7 +329,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, return passive; } -EXPORT_SYMBOL(iwl_get_passive_dwell_time); void iwl_init_scan_params(struct iwl_priv *priv) { @@ -344,12 +338,11 @@ void iwl_init_scan_params(struct iwl_priv *priv) if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; } -EXPORT_SYMBOL(iwl_init_scan_params); -static int __must_check iwl_scan_initiate(struct iwl_priv *priv, - struct ieee80211_vif *vif, - bool internal, - enum ieee80211_band band) +int __must_check iwl_scan_initiate(struct iwl_priv *priv, + struct ieee80211_vif *vif, + enum iwl_scan_type scan_type, + enum ieee80211_band band) { int ret; @@ -377,17 +370,19 @@ static int __must_check iwl_scan_initiate(struct iwl_priv *priv, } IWL_DEBUG_SCAN(priv, "Starting %sscan...\n", - internal ? "internal short " : ""); + scan_type == IWL_SCAN_NORMAL ? "" : + scan_type == IWL_SCAN_OFFCH_TX ? "offchan TX " : + "internal short "); set_bit(STATUS_SCANNING, &priv->status); - priv->is_internal_short_scan = internal; + priv->scan_type = scan_type; priv->scan_start = jiffies; priv->scan_band = band; ret = priv->cfg->ops->utils->request_scan(priv, vif); if (ret) { clear_bit(STATUS_SCANNING, &priv->status); - priv->is_internal_short_scan = false; + priv->scan_type = IWL_SCAN_NORMAL; return ret; } @@ -412,7 +407,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw, mutex_lock(&priv->mutex); if (test_bit(STATUS_SCANNING, &priv->status) && - !priv->is_internal_short_scan) { + priv->scan_type != IWL_SCAN_NORMAL) { IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); ret = -EAGAIN; goto out_unlock; @@ -426,11 +421,11 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw, * If an internal scan is in progress, just set * up the scan_request as per above. */ - if (priv->is_internal_short_scan) { + if (priv->scan_type != IWL_SCAN_NORMAL) { IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); ret = 0; } else - ret = iwl_scan_initiate(priv, vif, false, + ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, req->channels[0]->band); IWL_DEBUG_MAC80211(priv, "leave\n"); @@ -440,7 +435,6 @@ out_unlock: return ret; } -EXPORT_SYMBOL(iwl_mac_hw_scan); /* * internal short scan, this function should only been called while associated. @@ -460,7 +454,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work) mutex_lock(&priv->mutex); - if (priv->is_internal_short_scan == true) { + if (priv->scan_type == IWL_SCAN_RADIO_RESET) { IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); goto unlock; } @@ -470,7 +464,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work) goto unlock; } - if (iwl_scan_initiate(priv, NULL, true, priv->band)) + if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band)) IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n"); unlock: mutex_unlock(&priv->mutex); @@ -537,7 +531,6 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, return (u16)len; } -EXPORT_SYMBOL(iwl_fill_probe_req); static void iwl_bg_abort_scan(struct work_struct *work) { @@ -558,8 +551,7 @@ static void iwl_bg_scan_completed(struct work_struct *work) container_of(work, struct iwl_priv, scan_completed); bool aborted; - IWL_DEBUG_SCAN(priv, "Completed %sscan.\n", - priv->is_internal_short_scan ? "internal short " : ""); + IWL_DEBUG_SCAN(priv, "Completed scan.\n"); cancel_delayed_work(&priv->scan_check); @@ -574,7 +566,13 @@ static void iwl_bg_scan_completed(struct work_struct *work) goto out_settings; } - if (priv->is_internal_short_scan && !aborted) { + if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->_agn.offchan_tx_skb) { + ieee80211_tx_status_irqsafe(priv->hw, + priv->_agn.offchan_tx_skb); + priv->_agn.offchan_tx_skb = NULL; + } + + if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) { int err; /* Check if mac80211 requested scan during our internal scan */ @@ -582,7 +580,7 @@ static void iwl_bg_scan_completed(struct work_struct *work) goto out_complete; /* If so request a new scan */ - err = iwl_scan_initiate(priv, priv->scan_vif, false, + err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL, priv->scan_request->channels[0]->band); if (err) { IWL_DEBUG_SCAN(priv, @@ -622,7 +620,6 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv) INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan); INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); } -EXPORT_SYMBOL(iwl_setup_scan_deferred_work); void iwl_cancel_scan_deferred_work(struct iwl_priv *priv) { @@ -636,4 +633,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv) mutex_unlock(&priv->mutex); } } -EXPORT_SYMBOL(iwl_cancel_scan_deferred_work); diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 49493d176515..bc90a12408a3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -169,7 +169,6 @@ int iwl_send_add_sta(struct iwl_priv *priv, return ret; } -EXPORT_SYMBOL(iwl_send_add_sta); static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index, struct ieee80211_sta *sta, @@ -316,7 +315,6 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, return sta_id; } -EXPORT_SYMBOL_GPL(iwl_prep_station); #define STA_WAIT_TIMEOUT (HZ/2) @@ -379,7 +377,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx, *sta_id_r = sta_id; return ret; } -EXPORT_SYMBOL(iwl_add_station_common); /** * iwl_sta_ucode_deactivate - deactivate ucode status for a station @@ -513,7 +510,6 @@ out_err: spin_unlock_irqrestore(&priv->sta_lock, flags); return -EINVAL; } -EXPORT_SYMBOL_GPL(iwl_remove_station); /** * iwl_clear_ucode_stations - clear ucode station table bits @@ -548,7 +544,6 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv, if (!cleared) IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n"); } -EXPORT_SYMBOL(iwl_clear_ucode_stations); /** * iwl_restore_stations() - Restore driver known stations to device @@ -625,7 +620,6 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx) else IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n"); } -EXPORT_SYMBOL(iwl_restore_stations); void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) { @@ -668,7 +662,6 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx) priv->stations[sta_id].sta.sta.addr, ret); iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true); } -EXPORT_SYMBOL(iwl_reprogram_ap_sta); int iwl_get_free_ucode_key_index(struct iwl_priv *priv) { @@ -680,7 +673,6 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv) return WEP_INVALID_OFFSET; } -EXPORT_SYMBOL(iwl_get_free_ucode_key_index); void iwl_dealloc_bcast_stations(struct iwl_priv *priv) { @@ -700,7 +692,6 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv) } spin_unlock_irqrestore(&priv->sta_lock, flags); } -EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations); #ifdef CONFIG_IWLWIFI_DEBUG static void iwl_dump_lq_cmd(struct iwl_priv *priv, @@ -810,7 +801,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, } return ret; } -EXPORT_SYMBOL(iwl_send_lq_cmd); int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -832,4 +822,3 @@ int iwl_mac_sta_remove(struct ieee80211_hw *hw, mutex_unlock(&priv->mutex); return ret; } -EXPORT_SYMBOL(iwl_mac_sta_remove); diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 073b6ce6141c..277c9175dcf6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -84,7 +84,23 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) } txq->need_update = 0; } -EXPORT_SYMBOL(iwl_txq_update_write_ptr); + +/** + * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's + */ +void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id) +{ + struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_queue *q = &txq->q; + + if (q->n_bd == 0) + return; + + while (q->write_ptr != q->read_ptr) { + priv->cfg->ops->lib->txq_free_tfd(priv, txq); + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); + } +} /** * iwl_tx_queue_free - Deallocate DMA queue. @@ -97,17 +113,10 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr); void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) { struct iwl_tx_queue *txq = &priv->txq[txq_id]; - struct iwl_queue *q = &txq->q; struct device *dev = &priv->pci_dev->dev; int i; - if (q->n_bd == 0) - return; - - /* first, empty all BD's */ - for (; q->write_ptr != q->read_ptr; - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) - priv->cfg->ops->lib->txq_free_tfd(priv, txq); + iwl_tx_queue_unmap(priv, txq_id); /* De-alloc array of command/tx buffers */ for (i = 0; i < TFD_TX_CMD_SLOTS; i++) @@ -131,42 +140,35 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); } -EXPORT_SYMBOL(iwl_tx_queue_free); /** - * iwl_cmd_queue_free - Deallocate DMA queue. - * @txq: Transmit queue to deallocate. - * - * Empty queue by removing and destroying all BD's. - * Free all buffers. - * 0-fill, but do not free "txq" descriptor structure. + * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue */ -void iwl_cmd_queue_free(struct iwl_priv *priv) +void iwl_cmd_queue_unmap(struct iwl_priv *priv) { struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; struct iwl_queue *q = &txq->q; - struct device *dev = &priv->pci_dev->dev; int i; bool huge = false; if (q->n_bd == 0) return; - for (; q->read_ptr != q->write_ptr; - q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + while (q->read_ptr != q->write_ptr) { /* we have no way to tell if it is a huge cmd ATM */ i = get_cmd_index(q, q->read_ptr, 0); - if (txq->meta[i].flags & CMD_SIZE_HUGE) { + if (txq->meta[i].flags & CMD_SIZE_HUGE) huge = true; - continue; - } + else + pci_unmap_single(priv->pci_dev, + dma_unmap_addr(&txq->meta[i], mapping), + dma_unmap_len(&txq->meta[i], len), + PCI_DMA_BIDIRECTIONAL); - pci_unmap_single(priv->pci_dev, - dma_unmap_addr(&txq->meta[i], mapping), - dma_unmap_len(&txq->meta[i], len), - PCI_DMA_BIDIRECTIONAL); + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); } + if (huge) { i = q->n_window; pci_unmap_single(priv->pci_dev, @@ -174,6 +176,23 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) dma_unmap_len(&txq->meta[i], len), PCI_DMA_BIDIRECTIONAL); } +} + +/** + * iwl_cmd_queue_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +void iwl_cmd_queue_free(struct iwl_priv *priv) +{ + struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + struct device *dev = &priv->pci_dev->dev; + int i; + + iwl_cmd_queue_unmap(priv); /* De-alloc array of command/tx buffers */ for (i = 0; i <= TFD_CMD_SLOTS; i++) @@ -193,7 +212,6 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) /* 0-fill queue descriptor structure */ memset(txq, 0, sizeof(*txq)); } -EXPORT_SYMBOL(iwl_cmd_queue_free); /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** * DMA services @@ -233,7 +251,6 @@ int iwl_queue_space(const struct iwl_queue *q) s = 0; return s; } -EXPORT_SYMBOL(iwl_queue_space); /** @@ -384,7 +401,6 @@ out_free_arrays: return -ENOMEM; } -EXPORT_SYMBOL(iwl_tx_queue_init); void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, int slots_num, u32 txq_id) @@ -404,7 +420,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, /* Tell device where to find queue */ priv->cfg->ops->lib->txq_init(priv, txq); } -EXPORT_SYMBOL(iwl_tx_queue_reset); /*************** HOST COMMAND QUEUE FUNCTIONS *****/ @@ -641,4 +656,3 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) } meta->flags = 0; } -EXPORT_SYMBOL(iwl_tx_cmd_complete); diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c index 5a4982271e96..ed57e4402800 100644 --- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c +++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c @@ -287,7 +287,8 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm) return -EINVAL; } - freq = ieee80211_channel_to_frequency(umac_bss->channel); + freq = ieee80211_channel_to_frequency(umac_bss->channel, + band->band); channel = ieee80211_get_channel(wiphy, freq); signal = umac_bss->rssi * 100; diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index a944893ae3ca..9a57cf6a488f 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c @@ -543,7 +543,10 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf, switch (le32_to_cpu(complete->status)) { case UMAC_ASSOC_COMPLETE_SUCCESS: chan = ieee80211_get_channel(wiphy, - ieee80211_channel_to_frequency(complete->channel)); + ieee80211_channel_to_frequency(complete->channel, + complete->band == UMAC_BAND_2GHZ ? + IEEE80211_BAND_2GHZ : + IEEE80211_BAND_5GHZ)); if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) { /* Associated to a unallowed channel, disassociate. */ __iwm_invalidate_mlme_profile(iwm); @@ -841,7 +844,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf, goto err; } - freq = ieee80211_channel_to_frequency(umac_bss->channel); + freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band); channel = ieee80211_get_channel(wiphy, freq); signal = umac_bss->rssi * 100; diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 698a1f7694ed..30ef0351bfc4 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -607,7 +607,8 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, /* No channel, no luck */ if (chan_no != -1) { struct wiphy *wiphy = priv->wdev->wiphy; - int freq = ieee80211_channel_to_frequency(chan_no); + int freq = ieee80211_channel_to_frequency(chan_no, + IEEE80211_BAND_2GHZ); struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq); @@ -1597,7 +1598,8 @@ static int lbs_get_survey(struct wiphy *wiphy, struct net_device *dev, lbs_deb_enter(LBS_DEB_CFG80211); survey->channel = ieee80211_get_channel(wiphy, - ieee80211_channel_to_frequency(priv->channel)); + ieee80211_channel_to_frequency(priv->channel, + IEEE80211_BAND_2GHZ)); ret = lbs_get_rssi(priv, &signal, &noise); if (ret == 0) { diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 78c4da150a74..7e8a658b7670 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -145,9 +145,13 @@ int lbs_update_hw_spec(struct lbs_private *priv) if (priv->current_addr[0] == 0xff) memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN); - memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN); - if (priv->mesh_dev) - memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN); + if (!priv->copied_hwaddr) { + memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN); + if (priv->mesh_dev) + memcpy(priv->mesh_dev->dev_addr, + priv->current_addr, ETH_ALEN); + priv->copied_hwaddr = 1; + } out: lbs_deb_leave(LBS_DEB_CMD); diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index 18dd9a02c459..bc461eb39660 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h @@ -90,6 +90,7 @@ struct lbs_private { void *card; u8 fw_ready; u8 surpriseremoved; + u8 setup_fw_on_resume; int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); void (*reset_card) (struct lbs_private *priv); int (*enter_deep_sleep) (struct lbs_private *priv); @@ -101,6 +102,7 @@ struct lbs_private { u32 fwcapinfo; u16 regioncode; u8 current_addr[ETH_ALEN]; + u8 copied_hwaddr; /* Command download */ u8 dnld_sent; diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h index 5eac1351a021..6cb6935ee4a3 100644 --- a/drivers/net/wireless/libertas/host.h +++ b/drivers/net/wireless/libertas/host.h @@ -387,7 +387,7 @@ struct lbs_offset_value { struct mrvl_ie_domain_param_set { struct mrvl_ie_header header; - u8 country_code[3]; + u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; struct ieee80211_country_ie_triplet triplet[MAX_11D_TRIPLETS]; } __packed; diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c index 00600239a053..f6c2cd665f49 100644 --- a/drivers/net/wireless/libertas/if_spi.c +++ b/drivers/net/wireless/libertas/if_spi.c @@ -20,10 +20,8 @@ #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/jiffies.h> -#include <linux/kthread.h> #include <linux/list.h> #include <linux/netdevice.h> -#include <linux/semaphore.h> #include <linux/slab.h> #include <linux/spi/libertas_spi.h> #include <linux/spi/spi.h> @@ -34,6 +32,12 @@ #include "dev.h" #include "if_spi.h" +struct if_spi_packet { + struct list_head list; + u16 blen; + u8 buffer[0] __attribute__((aligned(4))); +}; + struct if_spi_card { struct spi_device *spi; struct lbs_private *priv; @@ -51,18 +55,36 @@ struct if_spi_card { unsigned long spu_reg_delay; /* Handles all SPI communication (except for FW load) */ - struct task_struct *spi_thread; - int run_thread; - - /* Used to wake up the spi_thread */ - struct semaphore spi_ready; - struct semaphore spi_thread_terminated; + struct workqueue_struct *workqueue; + struct work_struct packet_work; u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE]; + + /* A buffer of incoming packets from libertas core. + * Since we can't sleep in hw_host_to_card, we have to buffer + * them. */ + struct list_head cmd_packet_list; + struct list_head data_packet_list; + + /* Protects cmd_packet_list and data_packet_list */ + spinlock_t buffer_lock; }; static void free_if_spi_card(struct if_spi_card *card) { + struct list_head *cursor, *next; + struct if_spi_packet *packet; + + list_for_each_safe(cursor, next, &card->cmd_packet_list) { + packet = container_of(cursor, struct if_spi_packet, list); + list_del(&packet->list); + kfree(packet); + } + list_for_each_safe(cursor, next, &card->data_packet_list) { + packet = container_of(cursor, struct if_spi_packet, list); + list_del(&packet->list); + kfree(packet); + } spi_set_drvdata(card->spi, NULL); kfree(card); } @@ -622,7 +644,7 @@ out: /* * SPI Transfer Thread * - * The SPI thread handles all SPI transfers, so there is no need for a lock. + * The SPI worker handles all SPI transfers, so there is no need for a lock. */ /* Move a command from the card to the host */ @@ -742,6 +764,40 @@ out: return err; } +/* Move data or a command from the host to the card. */ +static void if_spi_h2c(struct if_spi_card *card, + struct if_spi_packet *packet, int type) +{ + int err = 0; + u16 int_type, port_reg; + + switch (type) { + case MVMS_DAT: + int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER; + port_reg = IF_SPI_DATA_RDWRPORT_REG; + break; + case MVMS_CMD: + int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER; + port_reg = IF_SPI_CMD_RDWRPORT_REG; + break; + default: + lbs_pr_err("can't transfer buffer of type %d\n", type); + err = -EINVAL; + goto out; + } + + /* Write the data to the card */ + err = spu_write(card, port_reg, packet->buffer, packet->blen); + if (err) + goto out; + +out: + kfree(packet); + + if (err) + lbs_pr_err("%s: error %d\n", __func__, err); +} + /* Inform the host about a card event */ static void if_spi_e2h(struct if_spi_card *card) { @@ -766,71 +822,88 @@ out: lbs_pr_err("%s: error %d\n", __func__, err); } -static int lbs_spi_thread(void *data) +static void if_spi_host_to_card_worker(struct work_struct *work) { int err; - struct if_spi_card *card = data; + struct if_spi_card *card; u16 hiStatus; + unsigned long flags; + struct if_spi_packet *packet; - while (1) { - /* Wait to be woken up by one of two things. First, our ISR - * could tell us that something happened on the WLAN. - * Secondly, libertas could call hw_host_to_card with more - * data, which we might be able to send. - */ - do { - err = down_interruptible(&card->spi_ready); - if (!card->run_thread) { - up(&card->spi_thread_terminated); - do_exit(0); - } - } while (err == -EINTR); + card = container_of(work, struct if_spi_card, packet_work); - /* Read the host interrupt status register to see what we - * can do. */ - err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG, - &hiStatus); - if (err) { - lbs_pr_err("I/O error\n"); + lbs_deb_enter(LBS_DEB_SPI); + + /* Read the host interrupt status register to see what we + * can do. */ + err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG, + &hiStatus); + if (err) { + lbs_pr_err("I/O error\n"); + goto err; + } + + if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) { + err = if_spi_c2h_cmd(card); + if (err) goto err; - } + } + if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) { + err = if_spi_c2h_data(card); + if (err) + goto err; + } - if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) { - err = if_spi_c2h_cmd(card); - if (err) - goto err; - } - if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) { - err = if_spi_c2h_data(card); - if (err) - goto err; + /* workaround: in PS mode, the card does not set the Command + * Download Ready bit, but it sets TX Download Ready. */ + if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY || + (card->priv->psstate != PS_STATE_FULL_POWER && + (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) { + /* This means two things. First of all, + * if there was a previous command sent, the card has + * successfully received it. + * Secondly, it is now ready to download another + * command. + */ + lbs_host_to_card_done(card->priv); + + /* Do we have any command packets from the host to + * send? */ + packet = NULL; + spin_lock_irqsave(&card->buffer_lock, flags); + if (!list_empty(&card->cmd_packet_list)) { + packet = (struct if_spi_packet *)(card-> + cmd_packet_list.next); + list_del(&packet->list); } + spin_unlock_irqrestore(&card->buffer_lock, flags); - /* workaround: in PS mode, the card does not set the Command - * Download Ready bit, but it sets TX Download Ready. */ - if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY || - (card->priv->psstate != PS_STATE_FULL_POWER && - (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) { - lbs_host_to_card_done(card->priv); + if (packet) + if_spi_h2c(card, packet, MVMS_CMD); + } + if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) { + /* Do we have any data packets from the host to + * send? */ + packet = NULL; + spin_lock_irqsave(&card->buffer_lock, flags); + if (!list_empty(&card->data_packet_list)) { + packet = (struct if_spi_packet *)(card-> + data_packet_list.next); + list_del(&packet->list); } + spin_unlock_irqrestore(&card->buffer_lock, flags); - if (hiStatus & IF_SPI_HIST_CARD_EVENT) - if_spi_e2h(card); + if (packet) + if_spi_h2c(card, packet, MVMS_DAT); + } + if (hiStatus & IF_SPI_HIST_CARD_EVENT) + if_spi_e2h(card); err: - if (err) - lbs_pr_err("%s: got error %d\n", __func__, err); - } -} + if (err) + lbs_pr_err("%s: got error %d\n", __func__, err); -/* Block until lbs_spi_thread thread has terminated */ -static void if_spi_terminate_spi_thread(struct if_spi_card *card) -{ - /* It would be nice to use kthread_stop here, but that function - * can't wake threads waiting for a semaphore. */ - card->run_thread = 0; - up(&card->spi_ready); - down(&card->spi_thread_terminated); + lbs_deb_leave(LBS_DEB_SPI); } /* @@ -842,18 +915,40 @@ static int if_spi_host_to_card(struct lbs_private *priv, u8 type, u8 *buf, u16 nb) { int err = 0; + unsigned long flags; struct if_spi_card *card = priv->card; + struct if_spi_packet *packet; + u16 blen; lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb); - nb = ALIGN(nb, 4); + if (nb == 0) { + lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb); + err = -EINVAL; + goto out; + } + blen = ALIGN(nb, 4); + packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC); + if (!packet) { + err = -ENOMEM; + goto out; + } + packet->blen = blen; + memcpy(packet->buffer, buf, nb); + memset(packet->buffer + nb, 0, blen - nb); switch (type) { case MVMS_CMD: - err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb); + priv->dnld_sent = DNLD_CMD_SENT; + spin_lock_irqsave(&card->buffer_lock, flags); + list_add_tail(&packet->list, &card->cmd_packet_list); + spin_unlock_irqrestore(&card->buffer_lock, flags); break; case MVMS_DAT: - err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb); + priv->dnld_sent = DNLD_DATA_SENT; + spin_lock_irqsave(&card->buffer_lock, flags); + list_add_tail(&packet->list, &card->data_packet_list); + spin_unlock_irqrestore(&card->buffer_lock, flags); break; default: lbs_pr_err("can't transfer buffer of type %d", type); @@ -861,6 +956,9 @@ static int if_spi_host_to_card(struct lbs_private *priv, break; } + /* Queue spi xfer work */ + queue_work(card->workqueue, &card->packet_work); +out: lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err); return err; } @@ -869,13 +967,14 @@ static int if_spi_host_to_card(struct lbs_private *priv, * Host Interrupts * * Service incoming interrupts from the WLAN device. We can't sleep here, so - * don't try to talk on the SPI bus, just wake up the SPI thread. + * don't try to talk on the SPI bus, just queue the SPI xfer work. */ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id) { struct if_spi_card *card = dev_id; - up(&card->spi_ready); + queue_work(card->workqueue, &card->packet_work); + return IRQ_HANDLED; } @@ -883,56 +982,26 @@ static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id) * SPI callbacks */ -static int __devinit if_spi_probe(struct spi_device *spi) +static int if_spi_init_card(struct if_spi_card *card) { - struct if_spi_card *card; - struct lbs_private *priv = NULL; - struct libertas_spi_platform_data *pdata = spi->dev.platform_data; - int err = 0, i; + struct spi_device *spi = card->spi; + int err, i; u32 scratch; - struct sched_param param = { .sched_priority = 1 }; const struct firmware *helper = NULL; const struct firmware *mainfw = NULL; lbs_deb_enter(LBS_DEB_SPI); - if (!pdata) { - err = -EINVAL; - goto out; - } - - if (pdata->setup) { - err = pdata->setup(spi); - if (err) - goto out; - } - - /* Allocate card structure to represent this specific device */ - card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL); - if (!card) { - err = -ENOMEM; - goto out; - } - spi_set_drvdata(spi, card); - card->pdata = pdata; - card->spi = spi; - card->prev_xfer_time = jiffies; - - sema_init(&card->spi_ready, 0); - sema_init(&card->spi_thread_terminated, 0); - - /* Initialize the SPI Interface Unit */ - err = spu_init(card, pdata->use_dummy_writes); + err = spu_init(card, card->pdata->use_dummy_writes); if (err) - goto free_card; + goto out; err = spu_get_chip_revision(card, &card->card_id, &card->card_rev); if (err) - goto free_card; + goto out; - /* Firmware load */ err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch); if (err) - goto free_card; + goto out; if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC) lbs_deb_spi("Firmware is already loaded for " "Marvell WLAN 802.11 adapter\n"); @@ -946,7 +1015,7 @@ static int __devinit if_spi_probe(struct spi_device *spi) lbs_pr_err("Unsupported chip_id: 0x%02x\n", card->card_id); err = -ENODEV; - goto free_card; + goto out; } err = lbs_get_firmware(&card->spi->dev, NULL, NULL, @@ -954,7 +1023,7 @@ static int __devinit if_spi_probe(struct spi_device *spi) &mainfw); if (err) { lbs_pr_err("failed to find firmware (%d)\n", err); - goto free_card; + goto out; } lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter " @@ -966,15 +1035,68 @@ static int __devinit if_spi_probe(struct spi_device *spi) spi->max_speed_hz); err = if_spi_prog_helper_firmware(card, helper); if (err) - goto free_card; + goto out; err = if_spi_prog_main_firmware(card, mainfw); if (err) - goto free_card; + goto out; lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n"); } err = spu_set_interrupt_mode(card, 0, 1); if (err) + goto out; + +out: + if (helper) + release_firmware(helper); + if (mainfw) + release_firmware(mainfw); + + lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err); + + return err; +} + +static int __devinit if_spi_probe(struct spi_device *spi) +{ + struct if_spi_card *card; + struct lbs_private *priv = NULL; + struct libertas_spi_platform_data *pdata = spi->dev.platform_data; + int err = 0; + + lbs_deb_enter(LBS_DEB_SPI); + + if (!pdata) { + err = -EINVAL; + goto out; + } + + if (pdata->setup) { + err = pdata->setup(spi); + if (err) + goto out; + } + + /* Allocate card structure to represent this specific device */ + card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL); + if (!card) { + err = -ENOMEM; + goto teardown; + } + spi_set_drvdata(spi, card); + card->pdata = pdata; + card->spi = spi; + card->prev_xfer_time = jiffies; + + INIT_LIST_HEAD(&card->cmd_packet_list); + INIT_LIST_HEAD(&card->data_packet_list); + spin_lock_init(&card->buffer_lock); + + /* Initialize the SPI Interface Unit */ + + /* Firmware load */ + err = if_spi_init_card(card); + if (err) goto free_card; /* Register our card with libertas. @@ -993,27 +1115,16 @@ static int __devinit if_spi_probe(struct spi_device *spi) priv->fw_ready = 1; /* Initialize interrupt handling stuff. */ - card->run_thread = 1; - card->spi_thread = kthread_run(lbs_spi_thread, card, "lbs_spi_thread"); - if (IS_ERR(card->spi_thread)) { - card->run_thread = 0; - err = PTR_ERR(card->spi_thread); - lbs_pr_err("error creating SPI thread: err=%d\n", err); - goto remove_card; - } - if (sched_setscheduler(card->spi_thread, SCHED_FIFO, ¶m)) - lbs_pr_err("Error setting scheduler, using default.\n"); + card->workqueue = create_workqueue("libertas_spi"); + INIT_WORK(&card->packet_work, if_spi_host_to_card_worker); err = request_irq(spi->irq, if_spi_host_interrupt, IRQF_TRIGGER_FALLING, "libertas_spi", card); if (err) { lbs_pr_err("can't get host irq line-- request_irq failed\n"); - goto terminate_thread; + goto terminate_workqueue; } - /* poke the IRQ handler so that we don't miss the first interrupt */ - up(&card->spi_ready); - /* Start the card. * This will call register_netdev, and we'll start * getting interrupts... */ @@ -1028,18 +1139,16 @@ static int __devinit if_spi_probe(struct spi_device *spi) release_irq: free_irq(spi->irq, card); -terminate_thread: - if_spi_terminate_spi_thread(card); -remove_card: +terminate_workqueue: + flush_workqueue(card->workqueue); + destroy_workqueue(card->workqueue); lbs_remove_card(priv); /* will call free_netdev */ free_card: free_if_spi_card(card); +teardown: + if (pdata->teardown) + pdata->teardown(spi); out: - if (helper) - release_firmware(helper); - if (mainfw) - release_firmware(mainfw); - lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err); return err; } @@ -1056,7 +1165,8 @@ static int __devexit libertas_spi_remove(struct spi_device *spi) lbs_remove_card(priv); /* will call free_netdev */ free_irq(spi->irq, card); - if_spi_terminate_spi_thread(card); + flush_workqueue(card->workqueue); + destroy_workqueue(card->workqueue); if (card->pdata->teardown) card->pdata->teardown(spi); free_if_spi_card(card); diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 6836a6dd9853..ca8149cd5bd9 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -539,6 +539,43 @@ static int lbs_thread(void *data) return 0; } +/** + * @brief This function gets the HW spec from the firmware and sets + * some basic parameters. + * + * @param priv A pointer to struct lbs_private structure + * @return 0 or -1 + */ +static int lbs_setup_firmware(struct lbs_private *priv) +{ + int ret = -1; + s16 curlevel = 0, minlevel = 0, maxlevel = 0; + + lbs_deb_enter(LBS_DEB_FW); + + /* Read MAC address from firmware */ + memset(priv->current_addr, 0xff, ETH_ALEN); + ret = lbs_update_hw_spec(priv); + if (ret) + goto done; + + /* Read power levels if available */ + ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel); + if (ret == 0) { + priv->txpower_cur = curlevel; + priv->txpower_min = minlevel; + priv->txpower_max = maxlevel; + } + + /* Send cmd to FW to enable 11D function */ + ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1); + + lbs_set_mac_control(priv); +done: + lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); + return ret; +} + int lbs_suspend(struct lbs_private *priv) { int ret; @@ -584,47 +621,13 @@ int lbs_resume(struct lbs_private *priv) lbs_pr_err("deep sleep activation failed: %d\n", ret); } - lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); - return ret; -} -EXPORT_SYMBOL_GPL(lbs_resume); - -/** - * @brief This function gets the HW spec from the firmware and sets - * some basic parameters. - * - * @param priv A pointer to struct lbs_private structure - * @return 0 or -1 - */ -static int lbs_setup_firmware(struct lbs_private *priv) -{ - int ret = -1; - s16 curlevel = 0, minlevel = 0, maxlevel = 0; - - lbs_deb_enter(LBS_DEB_FW); - - /* Read MAC address from firmware */ - memset(priv->current_addr, 0xff, ETH_ALEN); - ret = lbs_update_hw_spec(priv); - if (ret) - goto done; - - /* Read power levels if available */ - ret = lbs_get_tx_power(priv, &curlevel, &minlevel, &maxlevel); - if (ret == 0) { - priv->txpower_cur = curlevel; - priv->txpower_min = minlevel; - priv->txpower_max = maxlevel; - } + if (priv->setup_fw_on_resume) + ret = lbs_setup_firmware(priv); - /* Send cmd to FW to enable 11D function */ - ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_11D_ENABLE, 1); - - lbs_set_mac_control(priv); -done: lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret); return ret; } +EXPORT_SYMBOL_GPL(lbs_resume); /** * This function handles the timeout of command sending. diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c index acf3bf63ee33..9d097b9c8005 100644 --- a/drivers/net/wireless/libertas/mesh.c +++ b/drivers/net/wireless/libertas/mesh.c @@ -918,7 +918,6 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr, char *buf) { struct mrvl_mesh_defaults defs; - int maxlen; int ret; ret = mesh_get_default_parameters(dev, &defs); @@ -931,13 +930,11 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr, defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN; } - /* SSID not null terminated: reserve room for \0 + \n */ - maxlen = defs.meshie.val.mesh_id_len + 2; - maxlen = (PAGE_SIZE > maxlen) ? maxlen : PAGE_SIZE; + memcpy(buf, defs.meshie.val.mesh_id, defs.meshie.val.mesh_id_len); + buf[defs.meshie.val.mesh_id_len] = '\n'; + buf[defs.meshie.val.mesh_id_len + 1] = '\0'; - defs.meshie.val.mesh_id[defs.meshie.val.mesh_id_len] = '\0'; - - return snprintf(buf, maxlen, "%s\n", defs.meshie.val.mesh_id); + return defs.meshie.val.mesh_id_len + 1; } /** diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c index 9278b3c8ee30..d4005081f1df 100644 --- a/drivers/net/wireless/libertas_tf/main.c +++ b/drivers/net/wireless/libertas_tf/main.c @@ -225,7 +225,7 @@ static void lbtf_free_adapter(struct lbtf_private *priv) lbtf_deb_leave(LBTF_DEB_MAIN); } -static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct lbtf_private *priv = hw->priv; @@ -236,7 +236,6 @@ static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) * there are no buffered multicast frames to send */ ieee80211_stop_queues(priv->hw); - return NETDEV_TX_OK; } static void lbtf_tx_work(struct work_struct *work) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 454f045ddff3..56f439d58013 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -541,7 +541,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, } -static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { bool ack; struct ieee80211_tx_info *txi; @@ -551,7 +551,7 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (skb->len < 10) { /* Should not happen; just a sanity check for addr1 use */ dev_kfree_skb(skb); - return NETDEV_TX_OK; + return; } ack = mac80211_hwsim_tx_frame(hw, skb); @@ -571,7 +571,6 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack) txi->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(hw, skb); - return NETDEV_TX_OK; } @@ -943,7 +942,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn) + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) { switch (action) { case IEEE80211_AMPDU_TX_START: diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index 9ecf8407cb1b..36952274950e 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -232,6 +232,9 @@ struct mwl8k_priv { struct completion firmware_loading_complete; }; +#define MAX_WEP_KEY_LEN 13 +#define NUM_WEP_KEYS 4 + /* Per interface specific private data */ struct mwl8k_vif { struct list_head list; @@ -242,8 +245,21 @@ struct mwl8k_vif { /* Non AMPDU sequence number assigned by driver. */ u16 seqno; + + /* Saved WEP keys */ + struct { + u8 enabled; + u8 key[sizeof(struct ieee80211_key_conf) + MAX_WEP_KEY_LEN]; + } wep_key_conf[NUM_WEP_KEYS]; + + /* BSSID */ + u8 bssid[ETH_ALEN]; + + /* A flag to indicate is HW crypto is enabled for this bssid */ + bool is_hw_crypto_enabled; }; #define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv)) +#define IEEE80211_KEY_CONF(_u8) ((struct ieee80211_key_conf *)(_u8)) struct mwl8k_sta { /* Index into station database. Returned by UPDATE_STADB. */ @@ -337,6 +353,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = { #define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 #define MWL8K_CMD_BSS_START 0x1100 /* per-vif */ #define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */ +#define MWL8K_CMD_UPDATE_ENCRYPTION 0x1122 /* per-vif */ #define MWL8K_CMD_UPDATE_STADB 0x1123 static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize) @@ -375,6 +392,7 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize) MWL8K_CMDNAME(SET_RATEADAPT_MODE); MWL8K_CMDNAME(BSS_START); MWL8K_CMDNAME(SET_NEW_STN); + MWL8K_CMDNAME(UPDATE_ENCRYPTION); MWL8K_CMDNAME(UPDATE_STADB); default: snprintf(buf, bufsize, "0x%x", cmd); @@ -715,10 +733,12 @@ static inline void mwl8k_remove_dma_header(struct sk_buff *skb, __le16 qos) skb_pull(skb, sizeof(*tr) - hdrlen); } -static inline void mwl8k_add_dma_header(struct sk_buff *skb) +static void +mwl8k_add_dma_header(struct sk_buff *skb, int tail_pad) { struct ieee80211_hdr *wh; int hdrlen; + int reqd_hdrlen; struct mwl8k_dma_data *tr; /* @@ -730,11 +750,13 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb) wh = (struct ieee80211_hdr *)skb->data; hdrlen = ieee80211_hdrlen(wh->frame_control); - if (hdrlen != sizeof(*tr)) - skb_push(skb, sizeof(*tr) - hdrlen); + reqd_hdrlen = sizeof(*tr); + + if (hdrlen != reqd_hdrlen) + skb_push(skb, reqd_hdrlen - hdrlen); if (ieee80211_is_data_qos(wh->frame_control)) - hdrlen -= 2; + hdrlen -= IEEE80211_QOS_CTL_LEN; tr = (struct mwl8k_dma_data *)skb->data; if (wh != &tr->wh) @@ -747,9 +769,52 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb) * payload". That is, everything except for the 802.11 header. * This includes all crypto material including the MIC. */ - tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr)); + tr->fwlen = cpu_to_le16(skb->len - sizeof(*tr) + tail_pad); } +static void mwl8k_encapsulate_tx_frame(struct sk_buff *skb) +{ + struct ieee80211_hdr *wh; + struct ieee80211_tx_info *tx_info; + struct ieee80211_key_conf *key_conf; + int data_pad; + + wh = (struct ieee80211_hdr *)skb->data; + + tx_info = IEEE80211_SKB_CB(skb); + + key_conf = NULL; + if (ieee80211_is_data(wh->frame_control)) + key_conf = tx_info->control.hw_key; + + /* + * Make sure the packet header is in the DMA header format (4-address + * without QoS), the necessary crypto padding between the header and the + * payload has already been provided by mac80211, but it doesn't add tail + * padding when HW crypto is enabled. + * + * We have the following trailer padding requirements: + * - WEP: 4 trailer bytes (ICV) + * - TKIP: 12 trailer bytes (8 MIC + 4 ICV) + * - CCMP: 8 trailer bytes (MIC) + */ + data_pad = 0; + if (key_conf != NULL) { + switch (key_conf->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + data_pad = 4; + break; + case WLAN_CIPHER_SUITE_TKIP: + data_pad = 12; + break; + case WLAN_CIPHER_SUITE_CCMP: + data_pad = 8; + break; + } + } + mwl8k_add_dma_header(skb, data_pad); +} /* * Packet reception for 88w8366 AP firmware. @@ -778,6 +843,13 @@ struct mwl8k_rxd_8366_ap { #define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80 +/* 8366 AP rx_status bits */ +#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80 +#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF +#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02 +#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04 +#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08 + static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr) { struct mwl8k_rxd_8366_ap *rxd = _rxd; @@ -834,10 +906,16 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status, } else { status->band = IEEE80211_BAND_2GHZ; } - status->freq = ieee80211_channel_to_frequency(rxd->channel); + status->freq = ieee80211_channel_to_frequency(rxd->channel, + status->band); *qos = rxd->qos_control; + if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) && + (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) && + (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR)) + status->flag |= RX_FLAG_MMIC_ERROR; + return le16_to_cpu(rxd->pkt_len); } @@ -876,6 +954,11 @@ struct mwl8k_rxd_sta { #define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001 #define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02 +#define MWL8K_STA_RX_CTRL_DECRYPT_ERROR 0x04 +/* ICV=0 or MIC=1 */ +#define MWL8K_STA_RX_CTRL_DEC_ERR_TYPE 0x08 +/* Key is uploaded only in failure case */ +#define MWL8K_STA_RX_CTRL_KEY_INDEX 0x30 static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr) { @@ -931,9 +1014,13 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status, } else { status->band = IEEE80211_BAND_2GHZ; } - status->freq = ieee80211_channel_to_frequency(rxd->channel); + status->freq = ieee80211_channel_to_frequency(rxd->channel, + status->band); *qos = rxd->qos_control; + if ((rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DECRYPT_ERROR) && + (rxd->rx_ctrl & MWL8K_STA_RX_CTRL_DEC_ERR_TYPE)) + status->flag |= RX_FLAG_MMIC_ERROR; return le16_to_cpu(rxd->pkt_len); } @@ -969,13 +1056,12 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index) } memset(rxq->rxd, 0, size); - rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL); + rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL); if (rxq->buf == NULL) { wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n"); pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); return -ENOMEM; } - memset(rxq->buf, 0, MWL8K_RX_DESCS * sizeof(*rxq->buf)); for (i = 0; i < MWL8K_RX_DESCS; i++) { int desc_size; @@ -1092,9 +1178,25 @@ static inline void mwl8k_save_beacon(struct ieee80211_hw *hw, ieee80211_queue_work(hw, &priv->finalize_join_worker); } +static inline struct mwl8k_vif *mwl8k_find_vif_bss(struct list_head *vif_list, + u8 *bssid) +{ + struct mwl8k_vif *mwl8k_vif; + + list_for_each_entry(mwl8k_vif, + vif_list, list) { + if (memcmp(bssid, mwl8k_vif->bssid, + ETH_ALEN) == 0) + return mwl8k_vif; + } + + return NULL; +} + static int rxq_process(struct ieee80211_hw *hw, int index, int limit) { struct mwl8k_priv *priv = hw->priv; + struct mwl8k_vif *mwl8k_vif = NULL; struct mwl8k_rx_queue *rxq = priv->rxq + index; int processed; @@ -1104,6 +1206,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit) void *rxd; int pkt_len; struct ieee80211_rx_status status; + struct ieee80211_hdr *wh; __le16 qos; skb = rxq->buf[rxq->head].skb; @@ -1130,8 +1233,7 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit) rxq->rxd_count--; - skb_put(skb, pkt_len); - mwl8k_remove_dma_header(skb, qos); + wh = &((struct mwl8k_dma_data *)skb->data)->wh; /* * Check for a pending join operation. Save a @@ -1141,6 +1243,46 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit) if (mwl8k_capture_bssid(priv, (void *)skb->data)) mwl8k_save_beacon(hw, skb); + if (ieee80211_has_protected(wh->frame_control)) { + + /* Check if hw crypto has been enabled for + * this bss. If yes, set the status flags + * accordingly + */ + mwl8k_vif = mwl8k_find_vif_bss(&priv->vif_list, + wh->addr1); + + if (mwl8k_vif != NULL && + mwl8k_vif->is_hw_crypto_enabled == true) { + /* + * When MMIC ERROR is encountered + * by the firmware, payload is + * dropped and only 32 bytes of + * mwl8k Firmware header is sent + * to the host. + * + * We need to add four bytes of + * key information. In it + * MAC80211 expects keyidx set to + * 0 for triggering Counter + * Measure of MMIC failure. + */ + if (status.flag & RX_FLAG_MMIC_ERROR) { + struct mwl8k_dma_data *tr; + tr = (struct mwl8k_dma_data *)skb->data; + memset((void *)&(tr->data), 0, 4); + pkt_len += 4; + } + + if (!ieee80211_is_auth(wh->frame_control)) + status.flag |= RX_FLAG_IV_STRIPPED | + RX_FLAG_DECRYPTED | + RX_FLAG_MMIC_STRIPPED; + } + } + + skb_put(skb, pkt_len); + mwl8k_remove_dma_header(skb, qos); memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); ieee80211_rx_irqsafe(hw, skb); @@ -1204,13 +1346,12 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index) } memset(txq->txd, 0, size); - txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL); + txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL); if (txq->skb == NULL) { wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n"); pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); return -ENOMEM; } - memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb)); for (i = 0; i < MWL8K_TX_DESCS; i++) { struct mwl8k_tx_desc *tx_desc; @@ -1392,6 +1533,13 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force) info = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(info); + + /* Rate control is happening in the firmware. + * Ensure no tx rate is being reported. + */ + info->status.rates[0].idx = -1; + info->status.rates[0].count = 1; + if (MWL8K_TXD_SUCCESS(status)) info->flags |= IEEE80211_TX_STAT_ACK; @@ -1423,7 +1571,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index) txq->txd = NULL; } -static int +static void mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) { struct mwl8k_priv *priv = hw->priv; @@ -1443,7 +1591,11 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) else qos = 0; - mwl8k_add_dma_header(skb); + if (priv->ap_fw) + mwl8k_encapsulate_tx_frame(skb); + else + mwl8k_add_dma_header(skb, 0); + wh = &((struct mwl8k_dma_data *)skb->data)->wh; tx_info = IEEE80211_SKB_CB(skb); @@ -1481,7 +1633,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) wiphy_debug(hw->wiphy, "failed to dma map skb, dropping TX frame.\n"); dev_kfree_skb(skb); - return NETDEV_TX_OK; + return; } spin_lock_bh(&priv->tx_lock); @@ -1518,8 +1670,6 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb) mwl8k_tx_start(priv); spin_unlock_bh(&priv->tx_lock); - - return NETDEV_TX_OK; } @@ -1974,8 +2124,18 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw) cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma); cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); - for (i = 0; i < MWL8K_TX_QUEUES; i++) - cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma); + + /* + * Mac80211 stack has Q0 as highest priority and Q3 as lowest in + * that order. Firmware has Q3 as highest priority and Q0 as lowest + * in that order. Map Q3 of mac80211 to Q0 of firmware so that the + * priority is interpreted the right way in firmware. + */ + for (i = 0; i < MWL8K_TX_QUEUES; i++) { + int j = MWL8K_TX_QUEUES - 1 - i; + cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[j].txd_dma); + } + cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT | MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP | MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON); @@ -3099,6 +3259,274 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw, } /* + * CMD_UPDATE_ENCRYPTION. + */ + +#define MAX_ENCR_KEY_LENGTH 16 +#define MIC_KEY_LENGTH 8 + +struct mwl8k_cmd_update_encryption { + struct mwl8k_cmd_pkt header; + + __le32 action; + __le32 reserved; + __u8 mac_addr[6]; + __u8 encr_type; + +} __attribute__((packed)); + +struct mwl8k_cmd_set_key { + struct mwl8k_cmd_pkt header; + + __le32 action; + __le32 reserved; + __le16 length; + __le16 key_type_id; + __le32 key_info; + __le32 key_id; + __le16 key_len; + __u8 key_material[MAX_ENCR_KEY_LENGTH]; + __u8 tkip_tx_mic_key[MIC_KEY_LENGTH]; + __u8 tkip_rx_mic_key[MIC_KEY_LENGTH]; + __le16 tkip_rsc_low; + __le32 tkip_rsc_high; + __le16 tkip_tsc_low; + __le32 tkip_tsc_high; + __u8 mac_addr[6]; +} __attribute__((packed)); + +enum { + MWL8K_ENCR_ENABLE, + MWL8K_ENCR_SET_KEY, + MWL8K_ENCR_REMOVE_KEY, + MWL8K_ENCR_SET_GROUP_KEY, +}; + +#define MWL8K_UPDATE_ENCRYPTION_TYPE_WEP 0 +#define MWL8K_UPDATE_ENCRYPTION_TYPE_DISABLE 1 +#define MWL8K_UPDATE_ENCRYPTION_TYPE_TKIP 4 +#define MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED 7 +#define MWL8K_UPDATE_ENCRYPTION_TYPE_AES 8 + +enum { + MWL8K_ALG_WEP, + MWL8K_ALG_TKIP, + MWL8K_ALG_CCMP, +}; + +#define MWL8K_KEY_FLAG_TXGROUPKEY 0x00000004 +#define MWL8K_KEY_FLAG_PAIRWISE 0x00000008 +#define MWL8K_KEY_FLAG_TSC_VALID 0x00000040 +#define MWL8K_KEY_FLAG_WEP_TXKEY 0x01000000 +#define MWL8K_KEY_FLAG_MICKEY_VALID 0x02000000 + +static int mwl8k_cmd_update_encryption_enable(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u8 *addr, + u8 encr_type) +{ + struct mwl8k_cmd_update_encryption *cmd; + int rc; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->action = cpu_to_le32(MWL8K_ENCR_ENABLE); + memcpy(cmd->mac_addr, addr, ETH_ALEN); + cmd->encr_type = encr_type; + + rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); + kfree(cmd); + + return rc; +} + +static int mwl8k_encryption_set_cmd_info(struct mwl8k_cmd_set_key *cmd, + u8 *addr, + struct ieee80211_key_conf *key) +{ + cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_ENCRYPTION); + cmd->header.length = cpu_to_le16(sizeof(*cmd)); + cmd->length = cpu_to_le16(sizeof(*cmd) - + offsetof(struct mwl8k_cmd_set_key, length)); + cmd->key_id = cpu_to_le32(key->keyidx); + cmd->key_len = cpu_to_le16(key->keylen); + memcpy(cmd->mac_addr, addr, ETH_ALEN); + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + cmd->key_type_id = cpu_to_le16(MWL8K_ALG_WEP); + if (key->keyidx == 0) + cmd->key_info = cpu_to_le32(MWL8K_KEY_FLAG_WEP_TXKEY); + + break; + case WLAN_CIPHER_SUITE_TKIP: + cmd->key_type_id = cpu_to_le16(MWL8K_ALG_TKIP); + cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE) + : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY); + cmd->key_info |= cpu_to_le32(MWL8K_KEY_FLAG_MICKEY_VALID + | MWL8K_KEY_FLAG_TSC_VALID); + break; + case WLAN_CIPHER_SUITE_CCMP: + cmd->key_type_id = cpu_to_le16(MWL8K_ALG_CCMP); + cmd->key_info = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + ? cpu_to_le32(MWL8K_KEY_FLAG_PAIRWISE) + : cpu_to_le32(MWL8K_KEY_FLAG_TXGROUPKEY); + break; + default: + return -ENOTSUPP; + } + + return 0; +} + +static int mwl8k_cmd_encryption_set_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u8 *addr, + struct ieee80211_key_conf *key) +{ + struct mwl8k_cmd_set_key *cmd; + int rc; + int keymlen; + u32 action; + u8 idx; + struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + rc = mwl8k_encryption_set_cmd_info(cmd, addr, key); + if (rc < 0) + goto done; + + idx = key->keyidx; + + if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) + action = MWL8K_ENCR_SET_KEY; + else + action = MWL8K_ENCR_SET_GROUP_KEY; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + if (!mwl8k_vif->wep_key_conf[idx].enabled) { + memcpy(mwl8k_vif->wep_key_conf[idx].key, key, + sizeof(*key) + key->keylen); + mwl8k_vif->wep_key_conf[idx].enabled = 1; + } + + keymlen = 0; + action = MWL8K_ENCR_SET_KEY; + break; + case WLAN_CIPHER_SUITE_TKIP: + keymlen = MAX_ENCR_KEY_LENGTH + 2 * MIC_KEY_LENGTH; + break; + case WLAN_CIPHER_SUITE_CCMP: + keymlen = key->keylen; + break; + default: + rc = -ENOTSUPP; + goto done; + } + + memcpy(cmd->key_material, key->key, keymlen); + cmd->action = cpu_to_le32(action); + + rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); +done: + kfree(cmd); + + return rc; +} + +static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u8 *addr, + struct ieee80211_key_conf *key) +{ + struct mwl8k_cmd_set_key *cmd; + int rc; + struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (cmd == NULL) + return -ENOMEM; + + rc = mwl8k_encryption_set_cmd_info(cmd, addr, key); + if (rc < 0) + goto done; + + if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || + WLAN_CIPHER_SUITE_WEP104) + mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0; + + cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY); + + rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header); +done: + kfree(cmd); + + return rc; +} + +static int mwl8k_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd_param, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + int rc = 0; + u8 encr_type; + u8 *addr; + struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); + + if (vif->type == NL80211_IFTYPE_STATION) + return -EOPNOTSUPP; + + if (sta == NULL) + addr = hw->wiphy->perm_addr; + else + addr = sta->addr; + + if (cmd_param == SET_KEY) { + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + rc = mwl8k_cmd_encryption_set_key(hw, vif, addr, key); + if (rc) + goto out; + + if ((key->cipher == WLAN_CIPHER_SUITE_WEP40) + || (key->cipher == WLAN_CIPHER_SUITE_WEP104)) + encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_WEP; + else + encr_type = MWL8K_UPDATE_ENCRYPTION_TYPE_MIXED; + + rc = mwl8k_cmd_update_encryption_enable(hw, vif, addr, + encr_type); + if (rc) + goto out; + + mwl8k_vif->is_hw_crypto_enabled = true; + + } else { + rc = mwl8k_cmd_encryption_remove_key(hw, vif, addr, key); + + if (rc) + goto out; + + mwl8k_vif->is_hw_crypto_enabled = false; + + } +out: + return rc; +} + +/* * CMD_UPDATE_STADB. */ struct ewc_ht_info { @@ -3310,22 +3738,19 @@ static void mwl8k_rx_poll(unsigned long data) /* * Core driver operations. */ -static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct mwl8k_priv *priv = hw->priv; int index = skb_get_queue_mapping(skb); - int rc; if (!priv->radio_on) { wiphy_debug(hw->wiphy, "dropped TX frame since radio disabled\n"); dev_kfree_skb(skb); - return NETDEV_TX_OK; + return; } - rc = mwl8k_txq_xmit(hw, index, skb); - - return rc; + mwl8k_txq_xmit(hw, index, skb); } static int mwl8k_start(struct ieee80211_hw *hw) @@ -3469,6 +3894,8 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw, mwl8k_vif->vif = vif; mwl8k_vif->macid = macid; mwl8k_vif->seqno = 0; + memcpy(mwl8k_vif->bssid, vif->addr, ETH_ALEN); + mwl8k_vif->is_hw_crypto_enabled = false; /* Set the mac address. */ mwl8k_cmd_set_mac_addr(hw, vif, vif->addr); @@ -3528,9 +3955,13 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed) if (rc) goto out; - rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7); - if (!rc) - rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7); + rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x3); + if (rc) + wiphy_warn(hw->wiphy, "failed to set # of RX antennas"); + rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7); + if (rc) + wiphy_warn(hw->wiphy, "failed to set # of TX antennas"); + } else { rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level); if (rc) @@ -3866,18 +4297,27 @@ static int mwl8k_sta_add(struct ieee80211_hw *hw, { struct mwl8k_priv *priv = hw->priv; int ret; + int i; + struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif); + struct ieee80211_key_conf *key; if (!priv->ap_fw) { ret = mwl8k_cmd_update_stadb_add(hw, vif, sta); if (ret >= 0) { MWL8K_STA(sta)->peer_id = ret; - return 0; + ret = 0; } - return ret; + } else { + ret = mwl8k_cmd_set_new_stn_add(hw, vif, sta); } - return mwl8k_cmd_set_new_stn_add(hw, vif, sta); + for (i = 0; i < NUM_WEP_KEYS; i++) { + key = IEEE80211_KEY_CONF(mwl8k_vif->wep_key_conf[i].key); + if (mwl8k_vif->wep_key_conf[i].enabled) + mwl8k_set_key(hw, SET_KEY, vif, sta, key); + } + return ret; } static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue, @@ -3894,12 +4334,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue, if (!priv->wmm_enabled) rc = mwl8k_cmd_set_wmm_mode(hw, 1); - if (!rc) - rc = mwl8k_cmd_set_edca_params(hw, queue, + if (!rc) { + int q = MWL8K_TX_QUEUES - 1 - queue; + rc = mwl8k_cmd_set_edca_params(hw, q, params->cw_min, params->cw_max, params->aifs, params->txop); + } mwl8k_fw_unlock(hw); } @@ -3932,7 +4374,8 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx, static int mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn) + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) { switch (action) { case IEEE80211_AMPDU_RX_START: @@ -3955,6 +4398,7 @@ static const struct ieee80211_ops mwl8k_ops = { .bss_info_changed = mwl8k_bss_info_changed, .prepare_multicast = mwl8k_prepare_multicast, .configure_filter = mwl8k_configure_filter, + .set_key = mwl8k_set_key, .set_rts_threshold = mwl8k_set_rts_threshold, .sta_add = mwl8k_sta_add, .sta_remove = mwl8k_sta_remove, @@ -4332,7 +4776,7 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv) hw->queues = MWL8K_TX_QUEUES; /* Set rssi values to dBm */ - hw->flags |= IEEE80211_HW_SIGNAL_DBM; + hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_HAS_RATE_CONTROL; hw->vif_data_size = sizeof(struct mwl8k_vif); hw->sta_data_size = sizeof(struct mwl8k_sta); diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c index 86cb54c842e7..e99ca1c1e0d8 100644 --- a/drivers/net/wireless/orinoco/scan.c +++ b/drivers/net/wireless/orinoco/scan.c @@ -111,6 +111,11 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv, freq = ieee80211_dsss_chan_to_freq(le16_to_cpu(bss->a.channel)); channel = ieee80211_get_channel(wiphy, freq); + if (!channel) { + printk(KERN_DEBUG "Invalid channel designation %04X(%04X)", + bss->a.channel, freq); + return; /* Then ignore it for now */ + } timestamp = 0; capability = le16_to_cpu(bss->a.capabilities); beacon_interval = le16_to_cpu(bss->a.beacon_interv); diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig index 25f965ffc889..0ec55b50798e 100644 --- a/drivers/net/wireless/p54/Kconfig +++ b/drivers/net/wireless/p54/Kconfig @@ -43,9 +43,8 @@ config P54_SPI tristate "Prism54 SPI (stlc45xx) support" depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS ---help--- - This driver is for stlc4550 or stlc4560 based wireless chips. - This driver is experimental, untested and will probably only work on - Nokia's N800/N810 Portable Internet Tablet. + This driver is for stlc4550 or stlc4560 based wireless chips + such as Nokia's N800/N810 Portable Internet Tablet. If you choose to build a module, it'll be called p54spi. diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c index 35b09aa0529b..13d750da9301 100644 --- a/drivers/net/wireless/p54/eeprom.c +++ b/drivers/net/wireless/p54/eeprom.c @@ -55,6 +55,17 @@ static struct ieee80211_rate p54_arates[] = { { .bitrate = 540, .hw_value = 11, }, }; +static struct p54_rssi_db_entry p54_rssi_default = { + /* + * The defaults are taken from usb-logs of the + * vendor driver. So, they should be safe to + * use in case we can't get a match from the + * rssi <-> dBm conversion database. + */ + .mul = 130, + .add = -398, +}; + #define CHAN_HAS_CAL BIT(0) #define CHAN_HAS_LIMIT BIT(1) #define CHAN_HAS_CURVE BIT(2) @@ -87,13 +98,27 @@ static int p54_get_band_from_freq(u16 freq) return -1; } +static int same_band(u16 freq, u16 freq2) +{ + return p54_get_band_from_freq(freq) == p54_get_band_from_freq(freq2); +} + static int p54_compare_channels(const void *_a, const void *_b) { const struct p54_channel_entry *a = _a; const struct p54_channel_entry *b = _b; - return a->index - b->index; + return a->freq - b->freq; +} + +static int p54_compare_rssichan(const void *_a, + const void *_b) +{ + const struct p54_rssi_db_entry *a = _a; + const struct p54_rssi_db_entry *b = _b; + + return a->freq - b->freq; } static int p54_fill_band_bitrates(struct ieee80211_hw *dev, @@ -145,25 +170,26 @@ static int p54_generate_band(struct ieee80211_hw *dev, for (i = 0, j = 0; (j < list->band_channel_num[band]) && (i < list->entries); i++) { + struct p54_channel_entry *chan = &list->channels[i]; - if (list->channels[i].band != band) + if (chan->band != band) continue; - if (list->channels[i].data != CHAN_HAS_ALL) { - wiphy_err(dev->wiphy, - "%s%s%s is/are missing for channel:%d [%d MHz].\n", - (list->channels[i].data & CHAN_HAS_CAL ? "" : + if (chan->data != CHAN_HAS_ALL) { + wiphy_err(dev->wiphy, "%s%s%s is/are missing for " + "channel:%d [%d MHz].\n", + (chan->data & CHAN_HAS_CAL ? "" : " [iqauto calibration data]"), - (list->channels[i].data & CHAN_HAS_LIMIT ? "" : + (chan->data & CHAN_HAS_LIMIT ? "" : " [output power limits]"), - (list->channels[i].data & CHAN_HAS_CURVE ? "" : + (chan->data & CHAN_HAS_CURVE ? "" : " [curve data]"), - list->channels[i].index, list->channels[i].freq); + chan->index, chan->freq); continue; } - tmp->channels[j].band = list->channels[i].band; - tmp->channels[j].center_freq = list->channels[i].freq; + tmp->channels[j].band = chan->band; + tmp->channels[j].center_freq = chan->freq; j++; } @@ -291,7 +317,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev) } } - /* sort the list by the channel index */ + /* sort the channel list by frequency */ sort(list->channels, list->entries, sizeof(struct p54_channel_entry), p54_compare_channels, NULL); @@ -410,33 +436,121 @@ static int p54_convert_rev1(struct ieee80211_hw *dev, static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2", "Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" }; -static void p54_parse_rssical(struct ieee80211_hw *dev, void *data, int len, - u16 type) +static int p54_parse_rssical(struct ieee80211_hw *dev, + u8 *data, int len, u16 type) { struct p54_common *priv = dev->priv; - int offset = (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) ? 2 : 0; - int entry_size = sizeof(struct pda_rssi_cal_entry) + offset; - int num_entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2; - int i; + struct p54_rssi_db_entry *entry; + size_t db_len, entries; + int offset = 0, i; + + if (type != PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) { + entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2; + if (len != sizeof(struct pda_rssi_cal_entry) * entries) { + wiphy_err(dev->wiphy, "rssical size mismatch.\n"); + goto err_data; + } + } else { + /* + * Some devices (Dell 1450 USB, Xbow 5GHz card, etc...) + * have an empty two byte header. + */ + if (*((__le16 *)&data[offset]) == cpu_to_le16(0)) + offset += 2; - if (len != (entry_size * num_entries)) { - wiphy_err(dev->wiphy, - "unknown rssi calibration data packing type:(%x) len:%d.\n", - type, len); + entries = (len - offset) / + sizeof(struct pda_rssi_cal_ext_entry); - print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, - data, len); + if ((len - offset) % sizeof(struct pda_rssi_cal_ext_entry) || + entries <= 0) { + wiphy_err(dev->wiphy, "invalid rssi database.\n"); + goto err_data; + } + } - wiphy_err(dev->wiphy, "please report this issue.\n"); - return; + db_len = sizeof(*entry) * entries; + priv->rssi_db = kzalloc(db_len + sizeof(*priv->rssi_db), GFP_KERNEL); + if (!priv->rssi_db) + return -ENOMEM; + + priv->rssi_db->offset = 0; + priv->rssi_db->entries = entries; + priv->rssi_db->entry_size = sizeof(*entry); + priv->rssi_db->len = db_len; + + entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset); + if (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) { + struct pda_rssi_cal_ext_entry *cal = (void *) &data[offset]; + + for (i = 0; i < entries; i++) { + entry[i].freq = le16_to_cpu(cal[i].freq); + entry[i].mul = (s16) le16_to_cpu(cal[i].mul); + entry[i].add = (s16) le16_to_cpu(cal[i].add); + } + } else { + struct pda_rssi_cal_entry *cal = (void *) &data[offset]; + + for (i = 0; i < entries; i++) { + u16 freq; + switch (i) { + case IEEE80211_BAND_2GHZ: + freq = 2437; + break; + case IEEE80211_BAND_5GHZ: + freq = 5240; + break; + } + + entry[i].freq = freq; + entry[i].mul = (s16) le16_to_cpu(cal[i].mul); + entry[i].add = (s16) le16_to_cpu(cal[i].add); + } } - for (i = 0; i < num_entries; i++) { - struct pda_rssi_cal_entry *cal = data + - (offset + i * entry_size); - priv->rssical_db[i].mul = (s16) le16_to_cpu(cal->mul); - priv->rssical_db[i].add = (s16) le16_to_cpu(cal->add); + /* sort the list by channel frequency */ + sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL); + return 0; + +err_data: + wiphy_err(dev->wiphy, + "rssi calibration data packing type:(%x) len:%d.\n", + type, len); + + print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, data, len); + + wiphy_err(dev->wiphy, "please report this issue.\n"); + return -EINVAL; +} + +struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq) +{ + struct p54_rssi_db_entry *entry; + int i, found = -1; + + if (!priv->rssi_db) + return &p54_rssi_default; + + entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset); + for (i = 0; i < priv->rssi_db->entries; i++) { + if (!same_band(freq, entry[i].freq)) + continue; + + if (found == -1) { + found = i; + continue; + } + + /* nearest match */ + if (abs(freq - entry[i].freq) < + abs(freq - entry[found].freq)) { + found = i; + continue; + } else { + break; + } } + + return found < 0 ? &p54_rssi_default : &entry[found]; } static void p54_parse_default_country(struct ieee80211_hw *dev, @@ -627,21 +741,30 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) case PDR_RSSI_LINEAR_APPROXIMATION: case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND: case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED: - p54_parse_rssical(dev, entry->data, data_len, - le16_to_cpu(entry->code)); + err = p54_parse_rssical(dev, entry->data, data_len, + le16_to_cpu(entry->code)); + if (err) + goto err; break; - case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM: { - __le16 *src = (void *) entry->data; - s16 *dst = (void *) &priv->rssical_db; + case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2: { + struct pda_custom_wrapper *pda = (void *) entry->data; + __le16 *src; + u16 *dst; int i; - if (data_len != sizeof(priv->rssical_db)) { - err = -EINVAL; - goto err; - } - for (i = 0; i < sizeof(priv->rssical_db) / - sizeof(*src); i++) + if (priv->rssi_db || data_len < sizeof(*pda)) + break; + + priv->rssi_db = p54_convert_db(pda, data_len); + if (!priv->rssi_db) + break; + + src = (void *) priv->rssi_db->data; + dst = (void *) priv->rssi_db->data; + + for (i = 0; i < priv->rssi_db->entries; i++) *(dst++) = (s16) le16_to_cpu(*(src++)); + } break; case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: { @@ -717,6 +840,8 @@ good_eeprom: SET_IEEE80211_PERM_ADDR(dev, perm_addr); } + priv->cur_rssi = &p54_rssi_default; + wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n", dev->wiphy->perm_addr, priv->version, p54_rf_chips[priv->rxhw]); @@ -727,9 +852,11 @@ err: kfree(priv->iq_autocal); kfree(priv->output_limit); kfree(priv->curve_data); + kfree(priv->rssi_db); priv->iq_autocal = NULL; priv->output_limit = NULL; priv->curve_data = NULL; + priv->rssi_db = NULL; wiphy_err(dev->wiphy, "eeprom parse failed!\n"); return err; diff --git a/drivers/net/wireless/p54/eeprom.h b/drivers/net/wireless/p54/eeprom.h index 9051aef11249..afde72b84606 100644 --- a/drivers/net/wireless/p54/eeprom.h +++ b/drivers/net/wireless/p54/eeprom.h @@ -81,6 +81,12 @@ struct pda_pa_curve_data { u8 data[0]; } __packed; +struct pda_rssi_cal_ext_entry { + __le16 freq; + __le16 mul; + __le16 add; +} __packed; + struct pda_rssi_cal_entry { __le16 mul; __le16 add; @@ -179,6 +185,7 @@ struct pda_custom_wrapper { /* used by our modificated eeprom image */ #define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM 0xDEAD +#define PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 0xCAFF #define PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM 0xBEEF #define PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM 0xB05D diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c index 92b9b1f05fd5..2fab7d20ffc2 100644 --- a/drivers/net/wireless/p54/fwio.c +++ b/drivers/net/wireless/p54/fwio.c @@ -397,9 +397,9 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) union p54_scan_body_union *body; struct p54_scan_tail_rate *rate; struct pda_rssi_cal_entry *rssi; + struct p54_rssi_db_entry *rssi_data; unsigned int i; void *entry; - int band = priv->hw->conf.channel->band; __le16 freq = cpu_to_le16(priv->hw->conf.channel->center_freq); skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) + @@ -503,13 +503,14 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) } rssi = (struct pda_rssi_cal_entry *) skb_put(skb, sizeof(*rssi)); - rssi->mul = cpu_to_le16(priv->rssical_db[band].mul); - rssi->add = cpu_to_le16(priv->rssical_db[band].add); + rssi_data = p54_rssi_find(priv, le16_to_cpu(freq)); + rssi->mul = cpu_to_le16(rssi_data->mul); + rssi->add = cpu_to_le16(rssi_data->add); if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { /* Longbow frontend needs ever more */ rssi = (void *) skb_put(skb, sizeof(*rssi)); - rssi->mul = cpu_to_le16(priv->rssical_db[band].longbow_unkn); - rssi->add = cpu_to_le16(priv->rssical_db[band].longbow_unk2); + rssi->mul = cpu_to_le16(rssi_data->longbow_unkn); + rssi->add = cpu_to_le16(rssi_data->longbow_unk2); } if (priv->fw_var >= 0x509) { @@ -523,6 +524,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) hdr->len = cpu_to_le16(skb->len - sizeof(*hdr)); p54_tx(priv, skb); + priv->cur_rssi = rssi_data; return 0; err: @@ -557,6 +559,7 @@ int p54_set_edcf(struct p54_common *priv) { struct sk_buff *skb; struct p54_edcf *edcf; + u8 rtd; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf), P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC); @@ -573,9 +576,15 @@ int p54_set_edcf(struct p54_common *priv) edcf->sifs = 0x0a; edcf->eofpad = 0x06; } + /* + * calculate the extra round trip delay according to the + * formula from 802.11-2007 17.3.8.6. + */ + rtd = 3 * priv->coverage_class; + edcf->slottime += rtd; + edcf->round_trip_delay = cpu_to_le16(rtd); /* (see prism54/isl_oid.h for further details) */ edcf->frameburst = cpu_to_le16(0); - edcf->round_trip_delay = cpu_to_le16(0); edcf->flags = 0; memset(edcf->mapping, 0, sizeof(edcf->mapping)); memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue)); diff --git a/drivers/net/wireless/p54/lmac.h b/drivers/net/wireless/p54/lmac.h index 04b63ec80fa4..eb581abc1079 100644 --- a/drivers/net/wireless/p54/lmac.h +++ b/drivers/net/wireless/p54/lmac.h @@ -526,7 +526,7 @@ int p54_init_leds(struct p54_common *priv); void p54_unregister_leds(struct p54_common *priv); /* xmit functions */ -int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb); +void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb); int p54_tx_cancel(struct p54_common *priv, __le32 req_id); void p54_tx(struct p54_common *priv, struct sk_buff *skb); @@ -551,6 +551,7 @@ int p54_upload_key(struct p54_common *priv, u8 algo, int slot, /* eeprom */ int p54_download_eeprom(struct p54_common *priv, void *buf, u16 offset, u16 len); +struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *p, const u16 freq); /* utility */ u8 *p54_find_ie(struct sk_buff *skb, u8 ie); diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c index 622d27b6d8f2..356e6bb443a6 100644 --- a/drivers/net/wireless/p54/main.c +++ b/drivers/net/wireless/p54/main.c @@ -157,7 +157,7 @@ static int p54_beacon_update(struct p54_common *priv, * to cancel the old beacon template by hand, instead the firmware * will release the previous one through the feedback mechanism. */ - WARN_ON(p54_tx_80211(priv->hw, beacon)); + p54_tx_80211(priv->hw, beacon); priv->tsf_high32 = 0; priv->tsf_low32 = 0; @@ -524,6 +524,59 @@ static int p54_get_survey(struct ieee80211_hw *dev, int idx, return 0; } +static unsigned int p54_flush_count(struct p54_common *priv) +{ + unsigned int total = 0, i; + + BUILD_BUG_ON(P54_QUEUE_NUM > ARRAY_SIZE(priv->tx_stats)); + + /* + * Because the firmware has the sole control over any frames + * in the P54_QUEUE_BEACON or P54_QUEUE_SCAN queues, they + * don't really count as pending or active. + */ + for (i = P54_QUEUE_MGMT; i < P54_QUEUE_NUM; i++) + total += priv->tx_stats[i].len; + return total; +} + +static void p54_flush(struct ieee80211_hw *dev, bool drop) +{ + struct p54_common *priv = dev->priv; + unsigned int total, i; + + /* + * Currently, it wouldn't really matter if we wait for one second + * or 15 minutes. But once someone gets around and completes the + * TODOs [ancel stuck frames / reset device] in p54_work, it will + * suddenly make sense to wait that long. + */ + i = P54_STATISTICS_UPDATE * 2 / 20; + + /* + * In this case no locking is required because as we speak the + * queues have already been stopped and no new frames can sneak + * up from behind. + */ + while ((total = p54_flush_count(priv) && i--)) { + /* waste time */ + msleep(20); + } + + WARN(total, "tx flush timeout, unresponsive firmware"); +} + +static void p54_set_coverage_class(struct ieee80211_hw *dev, u8 coverage_class) +{ + struct p54_common *priv = dev->priv; + + mutex_lock(&priv->conf_mutex); + /* support all coverage class values as in 802.11-2007 Table 7-27 */ + priv->coverage_class = clamp_t(u8, coverage_class, 0, 31); + p54_set_edcf(priv); + mutex_unlock(&priv->conf_mutex); +} + static const struct ieee80211_ops p54_ops = { .tx = p54_tx_80211, .start = p54_start, @@ -536,11 +589,13 @@ static const struct ieee80211_ops p54_ops = { .sta_remove = p54_sta_add_remove, .set_key = p54_set_key, .config = p54_config, + .flush = p54_flush, .bss_info_changed = p54_bss_info_changed, .configure_filter = p54_configure_filter, .conf_tx = p54_conf_tx, .get_stats = p54_get_stats, .get_survey = p54_get_survey, + .set_coverage_class = p54_set_coverage_class, }; struct ieee80211_hw *p54_init_common(size_t priv_data_len) @@ -611,7 +666,7 @@ EXPORT_SYMBOL_GPL(p54_init_common); int p54_register_common(struct ieee80211_hw *dev, struct device *pdev) { - struct p54_common *priv = dev->priv; + struct p54_common __maybe_unused *priv = dev->priv; int err; err = ieee80211_register_hw(dev); @@ -642,10 +697,12 @@ void p54_free_common(struct ieee80211_hw *dev) kfree(priv->iq_autocal); kfree(priv->output_limit); kfree(priv->curve_data); + kfree(priv->rssi_db); kfree(priv->used_rxkeys); priv->iq_autocal = NULL; priv->output_limit = NULL; priv->curve_data = NULL; + priv->rssi_db = NULL; priv->used_rxkeys = NULL; ieee80211_free_hw(dev); } diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h index 43a3b2ead81a..50730fc23fe5 100644 --- a/drivers/net/wireless/p54/p54.h +++ b/drivers/net/wireless/p54/p54.h @@ -116,7 +116,8 @@ struct p54_edcf_queue_param { __le16 txop; } __packed; -struct p54_rssi_linear_approximation { +struct p54_rssi_db_entry { + u16 freq; s16 mul; s16 add; s16 longbow_unkn; @@ -197,13 +198,14 @@ struct p54_common { u8 rx_diversity_mask; u8 tx_diversity_mask; unsigned int output_power; + struct p54_rssi_db_entry *cur_rssi; int noise; /* calibration, output power limit and rssi<->dBm conversation data */ struct pda_iq_autocal_entry *iq_autocal; unsigned int iq_autocal_len; struct p54_cal_database *curve_data; struct p54_cal_database *output_limit; - struct p54_rssi_linear_approximation rssical_db[IEEE80211_NUM_BANDS]; + struct p54_cal_database *rssi_db; struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS]; /* BBP/MAC state */ @@ -215,6 +217,7 @@ struct p54_common { u32 tsf_low32, tsf_high32; u32 basic_rate_mask; u16 aid; + u8 coverage_class; bool powersave_override; __le32 beacon_req_id; struct completion beacon_comp; diff --git a/drivers/net/wireless/p54/p54spi_eeprom.h b/drivers/net/wireless/p54/p54spi_eeprom.h index d592cbd34d78..0b7bfb0adcf2 100644 --- a/drivers/net/wireless/p54/p54spi_eeprom.h +++ b/drivers/net/wireless/p54/p54spi_eeprom.h @@ -65,9 +65,10 @@ static unsigned char p54spi_eeprom[] = { 0x03, 0x00, 0x00, 0x11, /* PDR_ANTENNA_GAIN */ 0x08, 0x08, 0x08, 0x08, -0x09, 0x00, 0xad, 0xde, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOM */ - 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +0x0a, 0x00, 0xff, 0xca, /* PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2 */ + 0x01, 0x00, 0x0a, 0x00, + 0x00, 0x00, 0x0a, 0x00, + 0x85, 0x09, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, /* struct pda_custom_wrapper */ 0x10, 0x06, 0x5d, 0xb0, /* PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM */ @@ -671,7 +672,7 @@ static unsigned char p54spi_eeprom[] = { 0xa8, 0x09, 0x25, 0x00, 0xf5, 0xff, 0xf9, 0xff, 0x00, 0x01, 0x02, 0x00, 0x00, 0x00, /* PDR_END */ - 0x67, 0x99, + 0xb6, 0x04, }; #endif /* P54SPI_EEPROM_H */ diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index f618b9623e5a..7834c26c2954 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c @@ -273,11 +273,9 @@ void p54_tx(struct p54_common *priv, struct sk_buff *skb) static int p54_rssi_to_dbm(struct p54_common *priv, int rssi) { - int band = priv->hw->conf.channel->band; - if (priv->rxhw != 5) { - return ((rssi * priv->rssical_db[band].mul) / 64 + - priv->rssical_db[band].add) / 4; + return ((rssi * priv->cur_rssi->mul) / 64 + + priv->cur_rssi->add) / 4; } else { /* * TODO: find the correct formula @@ -369,7 +367,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb) rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32; priv->tsf_low32 = tsf32; - rx_status->flag |= RX_FLAG_TSFT; + rx_status->flag |= RX_FLAG_MACTIME_MPDU; if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) header_len += hdr->align[0]; @@ -698,7 +696,7 @@ static u8 p54_convert_algo(u32 cipher) } } -int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) +void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54_common *priv = dev->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -719,12 +717,8 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) &hdr_flags, &aid, &burst_allowed); if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { - if (!IS_QOS_QUEUE(queue)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } else { - return NETDEV_TX_BUSY; - } + dev_kfree_skb_any(skb); + return; } padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; @@ -867,5 +861,4 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) p54info->extra_len = extra_len; p54_tx(priv, skb); - return NETDEV_TX_OK; } diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 6f383cd684b0..f630552427b7 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -97,6 +97,18 @@ config RT2800PCI_RT35XX Support for these devices is non-functional at the moment and is intended for testers and developers. +config RT2800PCI_RT53XX + bool "rt2800-pci - Include support for rt53xx devices (EXPERIMENTAL)" + depends on EXPERIMENTAL + default n + ---help--- + This adds support for rt53xx wireless chipset family to the + rt2800pci driver. + Supported chips: RT5390 + + Support for these devices is non-functional at the moment and is + intended for testers and developers. + endif config RT2500USB diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 54ca49ad3472..329f3283697b 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -46,7 +46,7 @@ * These indirect registers work with busy bits, * and we will try maximal REGISTER_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay - * between each attampt. When the busy bit is still set at that time, + * between each attempt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. */ @@ -305,9 +305,7 @@ static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev, * Enable synchronisation. */ rt2x00pci_register_read(rt2x00dev, CSR14, ®); - rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); rt2x00_set_field32(®, CSR14_TSF_SYNC, conf->sync); - rt2x00_set_field32(®, CSR14_TBCN, 1); rt2x00pci_register_write(rt2x00dev, CSR14, reg); } @@ -647,6 +645,11 @@ static void rt2400pci_start_queue(struct data_queue *queue) rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); break; case QID_BEACON: + /* + * Allow the tbtt tasklet to be scheduled. + */ + tasklet_enable(&rt2x00dev->tbtt_tasklet); + rt2x00pci_register_read(rt2x00dev, CSR14, ®); rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); rt2x00_set_field32(®, CSR14_TBCN, 1); @@ -708,6 +711,11 @@ static void rt2400pci_stop_queue(struct data_queue *queue) rt2x00_set_field32(®, CSR14_TBCN, 0); rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); rt2x00pci_register_write(rt2x00dev, CSR14, reg); + + /* + * Wait for possibly running tbtt tasklets. + */ + tasklet_disable(&rt2x00dev->tbtt_tasklet); break; default: break; @@ -771,7 +779,7 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev) rt2x00pci_register_read(rt2x00dev, TXCSR2, ®); rt2x00_set_field32(®, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); rt2x00_set_field32(®, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); - rt2x00_set_field32(®, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit); + rt2x00_set_field32(®, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit); rt2x00_set_field32(®, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); @@ -787,13 +795,13 @@ static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev) entry_priv->desc_dma); rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); - entry_priv = rt2x00dev->bcn[1].entries[0].priv_data; + entry_priv = rt2x00dev->atim->entries[0].priv_data; rt2x00pci_register_read(rt2x00dev, TXCSR4, ®); rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER, entry_priv->desc_dma); rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); - entry_priv = rt2x00dev->bcn[0].entries[0].priv_data; + entry_priv = rt2x00dev->bcn->entries[0].priv_data; rt2x00pci_register_read(rt2x00dev, TXCSR6, ®); rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER, entry_priv->desc_dma); @@ -963,9 +971,9 @@ static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev) static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - int mask = (state == STATE_RADIO_IRQ_OFF) || - (state == STATE_RADIO_IRQ_OFF_ISR); + int mask = (state == STATE_RADIO_IRQ_OFF); u32 reg; + unsigned long flags; /* * When interrupts are being enabled, the interrupt registers @@ -974,12 +982,20 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, if (state == STATE_RADIO_IRQ_ON) { rt2x00pci_register_read(rt2x00dev, CSR7, ®); rt2x00pci_register_write(rt2x00dev, CSR7, reg); + + /* + * Enable tasklets. + */ + tasklet_enable(&rt2x00dev->txstatus_tasklet); + tasklet_enable(&rt2x00dev->rxdone_tasklet); } /* * Only toggle the interrupts bits we are going to use. * Non-checked interrupt bits are disabled by default. */ + spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); + rt2x00pci_register_read(rt2x00dev, CSR8, ®); rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, mask); rt2x00_set_field32(®, CSR8_TXDONE_TXRING, mask); @@ -987,6 +1003,17 @@ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, mask); rt2x00_set_field32(®, CSR8_RXDONE, mask); rt2x00pci_register_write(rt2x00dev, CSR8, reg); + + spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); + + if (state == STATE_RADIO_IRQ_OFF) { + /* + * Ensure that all tasklets are finished before + * disabling the interrupts. + */ + tasklet_disable(&rt2x00dev->txstatus_tasklet); + tasklet_disable(&rt2x00dev->rxdone_tasklet); + } } static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev) @@ -1059,9 +1086,7 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev, rt2400pci_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: - case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: - case STATE_RADIO_IRQ_OFF_ISR: rt2400pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: @@ -1106,19 +1131,21 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry, rt2x00_desc_write(txd, 2, word); rt2x00_desc_read(txd, 3, &word); - rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal); + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal); rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5); rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1); - rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service); + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service); rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6); rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1); rt2x00_desc_write(txd, 3, word); rt2x00_desc_read(txd, 4, &word); - rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, txdesc->length_low); + rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, + txdesc->u.plcp.length_low); rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8); rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1); - rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, txdesc->length_high); + rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, + txdesc->u.plcp.length_high); rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7); rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1); rt2x00_desc_write(txd, 4, word); @@ -1139,7 +1166,7 @@ static void rt2400pci_write_tx_desc(struct queue_entry *entry, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_RTS, test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)); - rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); rt2x00_desc_write(txd, 0, word); @@ -1183,8 +1210,6 @@ static void rt2400pci_write_beacon(struct queue_entry *entry, /* * Enable beaconing again. */ - rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); - rt2x00_set_field32(®, CSR14_TBCN, 1); rt2x00_set_field32(®, CSR14_BEACON_GEN, 1); rt2x00pci_register_write(rt2x00dev, CSR14, reg); } @@ -1253,7 +1278,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry, static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, const enum data_queue_qid queue_idx) { - struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); struct queue_entry_priv_pci *entry_priv; struct queue_entry *entry; struct txdone_entry_desc txdesc; @@ -1289,57 +1314,68 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, } } -static irqreturn_t rt2400pci_interrupt_thread(int irq, void *dev_instance) +static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, + struct rt2x00_field32 irq_field) { - struct rt2x00_dev *rt2x00dev = dev_instance; - u32 reg = rt2x00dev->irqvalue[0]; + u32 reg; /* - * Handle interrupts, walk through all bits - * and run the tasks, the bits are checked in order of - * priority. + * Enable a single interrupt. The interrupt mask register + * access needs locking. */ + spin_lock_irq(&rt2x00dev->irqmask_lock); - /* - * 1 - Beacon timer expired interrupt. - */ - if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) - rt2x00lib_beacondone(rt2x00dev); + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + rt2x00_set_field32(®, irq_field, 0); + rt2x00pci_register_write(rt2x00dev, CSR8, reg); - /* - * 2 - Rx ring done interrupt. - */ - if (rt2x00_get_field32(reg, CSR7_RXDONE)) - rt2x00pci_rxdone(rt2x00dev); + spin_unlock_irq(&rt2x00dev->irqmask_lock); +} - /* - * 3 - Atim ring transmit done interrupt. - */ - if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) - rt2400pci_txdone(rt2x00dev, QID_ATIM); +static void rt2400pci_txstatus_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + u32 reg; /* - * 4 - Priority ring transmit done interrupt. + * Handle all tx queues. */ - if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) - rt2400pci_txdone(rt2x00dev, QID_AC_VO); + rt2400pci_txdone(rt2x00dev, QID_ATIM); + rt2400pci_txdone(rt2x00dev, QID_AC_VO); + rt2400pci_txdone(rt2x00dev, QID_AC_VI); /* - * 5 - Tx ring transmit done interrupt. + * Enable all TXDONE interrupts again. */ - if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) - rt2400pci_txdone(rt2x00dev, QID_AC_VI); + spin_lock_irq(&rt2x00dev->irqmask_lock); - /* Enable interrupts again. */ - rt2x00dev->ops->lib->set_device_state(rt2x00dev, - STATE_RADIO_IRQ_ON_ISR); - return IRQ_HANDLED; + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + rt2x00_set_field32(®, CSR8_TXDONE_TXRING, 0); + rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, 0); + rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, 0); + rt2x00pci_register_write(rt2x00dev, CSR8, reg); + + spin_unlock_irq(&rt2x00dev->irqmask_lock); +} + +static void rt2400pci_tbtt_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt2x00lib_beacondone(rt2x00dev); + rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE); +} + +static void rt2400pci_rxdone_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt2x00pci_rxdone(rt2x00dev); + rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE); } static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; - u32 reg; + u32 reg, mask; /* * Get the interrupt sources & saved to local variable. @@ -1354,14 +1390,44 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return IRQ_HANDLED; - /* Store irqvalues for use in the interrupt thread. */ - rt2x00dev->irqvalue[0] = reg; + mask = reg; - /* Disable interrupts, will be enabled again in the interrupt thread. */ - rt2x00dev->ops->lib->set_device_state(rt2x00dev, - STATE_RADIO_IRQ_OFF_ISR); + /* + * Schedule tasklets for interrupt handling. + */ + if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) + tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); - return IRQ_WAKE_THREAD; + if (rt2x00_get_field32(reg, CSR7_RXDONE)) + tasklet_schedule(&rt2x00dev->rxdone_tasklet); + + if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) || + rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) || + rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) { + tasklet_schedule(&rt2x00dev->txstatus_tasklet); + /* + * Mask out all txdone interrupts. + */ + rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1); + rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1); + rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1); + } + + /* + * Disable all interrupts for which a tasklet was scheduled right now, + * the tasklet will reenable the appropriate interrupts. + */ + spin_lock(&rt2x00dev->irqmask_lock); + + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + reg |= mask; + rt2x00pci_register_write(rt2x00dev, CSR8, reg); + + spin_unlock(&rt2x00dev->irqmask_lock); + + + + return IRQ_HANDLED; } /* @@ -1574,6 +1640,7 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) */ __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags); /* * Set the rssi offset. @@ -1655,7 +1722,9 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = { static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { .irq_handler = rt2400pci_interrupt, - .irq_handler_thread = rt2400pci_interrupt_thread, + .txstatus_tasklet = rt2400pci_txstatus_tasklet, + .tbtt_tasklet = rt2400pci_tbtt_tasklet, + .rxdone_tasklet = rt2400pci_rxdone_tasklet, .probe_hw = rt2400pci_probe_hw, .initialize = rt2x00pci_initialize, .uninitialize = rt2x00pci_uninitialize, diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index a9ff26a27724..58277878889e 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -293,7 +293,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00intf_conf *conf, const unsigned int flags) { - struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON); + struct data_queue *queue = rt2x00dev->bcn; unsigned int bcn_preload; u32 reg; @@ -311,9 +311,7 @@ static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev, * Enable synchronisation. */ rt2x00pci_register_read(rt2x00dev, CSR14, ®); - rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); rt2x00_set_field32(®, CSR14_TSF_SYNC, conf->sync); - rt2x00_set_field32(®, CSR14_TBCN, 1); rt2x00pci_register_write(rt2x00dev, CSR14, reg); } @@ -737,6 +735,11 @@ static void rt2500pci_start_queue(struct data_queue *queue) rt2x00pci_register_write(rt2x00dev, RXCSR0, reg); break; case QID_BEACON: + /* + * Allow the tbtt tasklet to be scheduled. + */ + tasklet_enable(&rt2x00dev->tbtt_tasklet); + rt2x00pci_register_read(rt2x00dev, CSR14, ®); rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); rt2x00_set_field32(®, CSR14_TBCN, 1); @@ -798,6 +801,11 @@ static void rt2500pci_stop_queue(struct data_queue *queue) rt2x00_set_field32(®, CSR14_TBCN, 0); rt2x00_set_field32(®, CSR14_BEACON_GEN, 0); rt2x00pci_register_write(rt2x00dev, CSR14, reg); + + /* + * Wait for possibly running tbtt tasklets. + */ + tasklet_disable(&rt2x00dev->tbtt_tasklet); break; default: break; @@ -857,7 +865,7 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev) rt2x00pci_register_read(rt2x00dev, TXCSR2, ®); rt2x00_set_field32(®, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); rt2x00_set_field32(®, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); - rt2x00_set_field32(®, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit); + rt2x00_set_field32(®, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit); rt2x00_set_field32(®, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); @@ -873,13 +881,13 @@ static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev) entry_priv->desc_dma); rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); - entry_priv = rt2x00dev->bcn[1].entries[0].priv_data; + entry_priv = rt2x00dev->atim->entries[0].priv_data; rt2x00pci_register_read(rt2x00dev, TXCSR4, ®); rt2x00_set_field32(®, TXCSR4_ATIM_RING_REGISTER, entry_priv->desc_dma); rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); - entry_priv = rt2x00dev->bcn[0].entries[0].priv_data; + entry_priv = rt2x00dev->bcn->entries[0].priv_data; rt2x00pci_register_read(rt2x00dev, TXCSR6, ®); rt2x00_set_field32(®, TXCSR6_BEACON_RING_REGISTER, entry_priv->desc_dma); @@ -1118,9 +1126,9 @@ static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev) static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - int mask = (state == STATE_RADIO_IRQ_OFF) || - (state == STATE_RADIO_IRQ_OFF_ISR); + int mask = (state == STATE_RADIO_IRQ_OFF); u32 reg; + unsigned long flags; /* * When interrupts are being enabled, the interrupt registers @@ -1129,12 +1137,20 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, if (state == STATE_RADIO_IRQ_ON) { rt2x00pci_register_read(rt2x00dev, CSR7, ®); rt2x00pci_register_write(rt2x00dev, CSR7, reg); + + /* + * Enable tasklets. + */ + tasklet_enable(&rt2x00dev->txstatus_tasklet); + tasklet_enable(&rt2x00dev->rxdone_tasklet); } /* * Only toggle the interrupts bits we are going to use. * Non-checked interrupt bits are disabled by default. */ + spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); + rt2x00pci_register_read(rt2x00dev, CSR8, ®); rt2x00_set_field32(®, CSR8_TBCN_EXPIRE, mask); rt2x00_set_field32(®, CSR8_TXDONE_TXRING, mask); @@ -1142,6 +1158,16 @@ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, mask); rt2x00_set_field32(®, CSR8_RXDONE, mask); rt2x00pci_register_write(rt2x00dev, CSR8, reg); + + spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); + + if (state == STATE_RADIO_IRQ_OFF) { + /* + * Ensure that all tasklets are finished. + */ + tasklet_disable(&rt2x00dev->txstatus_tasklet); + tasklet_disable(&rt2x00dev->rxdone_tasklet); + } } static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev) @@ -1214,9 +1240,7 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev, rt2500pci_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: - case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: - case STATE_RADIO_IRQ_OFF_ISR: rt2500pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: @@ -1263,10 +1287,12 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry, rt2x00_desc_write(txd, 2, word); rt2x00_desc_read(txd, 3, &word); - rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal); - rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service); - rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, txdesc->length_low); - rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, txdesc->length_high); + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal); + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, + txdesc->u.plcp.length_low); + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, + txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 3, word); rt2x00_desc_read(txd, 10, &word); @@ -1291,7 +1317,7 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry, rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1); - rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); @@ -1337,8 +1363,6 @@ static void rt2500pci_write_beacon(struct queue_entry *entry, /* * Enable beaconing again. */ - rt2x00_set_field32(®, CSR14_TSF_COUNT, 1); - rt2x00_set_field32(®, CSR14_TBCN, 1); rt2x00_set_field32(®, CSR14_BEACON_GEN, 1); rt2x00pci_register_write(rt2x00dev, CSR14, reg); } @@ -1386,7 +1410,7 @@ static void rt2500pci_fill_rxdone(struct queue_entry *entry, static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, const enum data_queue_qid queue_idx) { - struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); struct queue_entry_priv_pci *entry_priv; struct queue_entry *entry; struct txdone_entry_desc txdesc; @@ -1422,58 +1446,68 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, } } -static irqreturn_t rt2500pci_interrupt_thread(int irq, void *dev_instance) +static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, + struct rt2x00_field32 irq_field) { - struct rt2x00_dev *rt2x00dev = dev_instance; - u32 reg = rt2x00dev->irqvalue[0]; + u32 reg; /* - * Handle interrupts, walk through all bits - * and run the tasks, the bits are checked in order of - * priority. + * Enable a single interrupt. The interrupt mask register + * access needs locking. */ + spin_lock_irq(&rt2x00dev->irqmask_lock); - /* - * 1 - Beacon timer expired interrupt. - */ - if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) - rt2x00lib_beacondone(rt2x00dev); + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + rt2x00_set_field32(®, irq_field, 0); + rt2x00pci_register_write(rt2x00dev, CSR8, reg); - /* - * 2 - Rx ring done interrupt. - */ - if (rt2x00_get_field32(reg, CSR7_RXDONE)) - rt2x00pci_rxdone(rt2x00dev); + spin_unlock_irq(&rt2x00dev->irqmask_lock); +} - /* - * 3 - Atim ring transmit done interrupt. - */ - if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) - rt2500pci_txdone(rt2x00dev, QID_ATIM); +static void rt2500pci_txstatus_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + u32 reg; /* - * 4 - Priority ring transmit done interrupt. + * Handle all tx queues. */ - if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) - rt2500pci_txdone(rt2x00dev, QID_AC_VO); + rt2500pci_txdone(rt2x00dev, QID_ATIM); + rt2500pci_txdone(rt2x00dev, QID_AC_VO); + rt2500pci_txdone(rt2x00dev, QID_AC_VI); /* - * 5 - Tx ring transmit done interrupt. + * Enable all TXDONE interrupts again. */ - if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) - rt2500pci_txdone(rt2x00dev, QID_AC_VI); + spin_lock_irq(&rt2x00dev->irqmask_lock); + + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + rt2x00_set_field32(®, CSR8_TXDONE_TXRING, 0); + rt2x00_set_field32(®, CSR8_TXDONE_ATIMRING, 0); + rt2x00_set_field32(®, CSR8_TXDONE_PRIORING, 0); + rt2x00pci_register_write(rt2x00dev, CSR8, reg); - /* Enable interrupts again. */ - rt2x00dev->ops->lib->set_device_state(rt2x00dev, - STATE_RADIO_IRQ_ON_ISR); + spin_unlock_irq(&rt2x00dev->irqmask_lock); +} - return IRQ_HANDLED; +static void rt2500pci_tbtt_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt2x00lib_beacondone(rt2x00dev); + rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE); +} + +static void rt2500pci_rxdone_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt2x00pci_rxdone(rt2x00dev); + rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE); } static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; - u32 reg; + u32 reg, mask; /* * Get the interrupt sources & saved to local variable. @@ -1488,14 +1522,42 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return IRQ_HANDLED; - /* Store irqvalues for use in the interrupt thread. */ - rt2x00dev->irqvalue[0] = reg; + mask = reg; - /* Disable interrupts, will be enabled again in the interrupt thread. */ - rt2x00dev->ops->lib->set_device_state(rt2x00dev, - STATE_RADIO_IRQ_OFF_ISR); + /* + * Schedule tasklets for interrupt handling. + */ + if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) + tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); - return IRQ_WAKE_THREAD; + if (rt2x00_get_field32(reg, CSR7_RXDONE)) + tasklet_schedule(&rt2x00dev->rxdone_tasklet); + + if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) || + rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) || + rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) { + tasklet_schedule(&rt2x00dev->txstatus_tasklet); + /* + * Mask out all txdone interrupts. + */ + rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1); + rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1); + rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1); + } + + /* + * Disable all interrupts for which a tasklet was scheduled right now, + * the tasklet will reenable the appropriate interrupts. + */ + spin_lock(&rt2x00dev->irqmask_lock); + + rt2x00pci_register_read(rt2x00dev, CSR8, ®); + reg |= mask; + rt2x00pci_register_write(rt2x00dev, CSR8, reg); + + spin_unlock(&rt2x00dev->irqmask_lock); + + return IRQ_HANDLED; } /* @@ -1896,6 +1958,7 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) */ __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags); /* * Set the rssi offset. @@ -1952,7 +2015,9 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = { static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { .irq_handler = rt2500pci_interrupt, - .irq_handler_thread = rt2500pci_interrupt_thread, + .txstatus_tasklet = rt2500pci_txstatus_tasklet, + .tbtt_tasklet = rt2500pci_tbtt_tasklet, + .rxdone_tasklet = rt2500pci_rxdone_tasklet, .probe_hw = rt2500pci_probe_hw, .initialize = rt2x00pci_initialize, .uninitialize = rt2x00pci_uninitialize, diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 6b3b1de46792..979fe6596a2d 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -478,9 +478,7 @@ static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev, rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); rt2500usb_register_read(rt2x00dev, TXRX_CSR19, ®); - rt2x00_set_field16(®, TXRX_CSR19_TSF_COUNT, 1); rt2x00_set_field16(®, TXRX_CSR19_TSF_SYNC, conf->sync); - rt2x00_set_field16(®, TXRX_CSR19_TBCN, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); } @@ -1056,9 +1054,7 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev, rt2500usb_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: - case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: - case STATE_RADIO_IRQ_OFF_ISR: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: @@ -1104,7 +1100,7 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry, (txdesc->rate_mode == RATE_MODE_OFDM)); rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); - rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher); rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx); @@ -1118,10 +1114,12 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry, rt2x00_desc_write(txd, 1, word); rt2x00_desc_read(txd, 2, &word); - rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); - rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, + txdesc->u.plcp.length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, + txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 2, word); if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { @@ -1799,6 +1797,7 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) __set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags); } __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags); /* * Set the rssi offset. diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 4c55e8525cad..70b9abbdeb9e 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -51,6 +51,7 @@ * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390) * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392) * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) + * RF5390 2.4G 1T1R */ #define RF2820 0x0001 #define RF2850 0x0002 @@ -65,6 +66,7 @@ #define RF3320 0x000b #define RF3322 0x000c #define RF3853 0x000d +#define RF5390 0x5390 /* * Chipset revisions. @@ -77,6 +79,7 @@ #define REV_RT3071E 0x0211 #define REV_RT3090E 0x0211 #define REV_RT3390E 0x0211 +#define REV_RT5390F 0x0502 /* * Signal information. @@ -121,6 +124,13 @@ #define E2PROM_CSR_RELOAD FIELD32(0x00000080) /* + * AUX_CTRL: Aux/PCI-E related configuration + */ +#define AUX_CTRL 0x10c +#define AUX_CTRL_WAKE_PCIE_EN FIELD32(0x00000002) +#define AUX_CTRL_FORCE_PCIE_CLK FIELD32(0x00000400) + +/* * OPT_14: Unknown register used by rt3xxx devices. */ #define OPT_14_CSR 0x0114 @@ -270,6 +280,7 @@ /* * GPIO_CTRL_CFG: + * GPIOD: GPIO direction, 0: Output, 1: Input */ #define GPIO_CTRL_CFG 0x0228 #define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001) @@ -280,7 +291,14 @@ #define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020) #define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040) #define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080) -#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100) +#define GPIO_CTRL_CFG_GPIOD_BIT0 FIELD32(0x00000100) +#define GPIO_CTRL_CFG_GPIOD_BIT1 FIELD32(0x00000200) +#define GPIO_CTRL_CFG_GPIOD_BIT2 FIELD32(0x00000400) +#define GPIO_CTRL_CFG_GPIOD_BIT3 FIELD32(0x00000800) +#define GPIO_CTRL_CFG_GPIOD_BIT4 FIELD32(0x00001000) +#define GPIO_CTRL_CFG_GPIOD_BIT5 FIELD32(0x00002000) +#define GPIO_CTRL_CFG_GPIOD_BIT6 FIELD32(0x00004000) +#define GPIO_CTRL_CFG_GPIOD_BIT7 FIELD32(0x00008000) /* * MCU_CMD_CFG @@ -372,8 +390,12 @@ /* * US_CYC_CNT + * BT_MODE_EN: Bluetooth mode enable + * CLOCK CYCLE: Clock cycle count in 1us. + * PCI:0x21, PCIE:0x7d, USB:0x1e */ #define US_CYC_CNT 0x02a4 +#define US_CYC_CNT_BT_MODE_EN FIELD32(0x00000100) #define US_CYC_CNT_CLOCK_CYCLE FIELD32(0x000000ff) /* @@ -442,7 +464,7 @@ */ #define RF_CSR_CFG 0x0500 #define RF_CSR_CFG_DATA FIELD32(0x000000ff) -#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00) +#define RF_CSR_CFG_REGNUM FIELD32(0x00003f00) #define RF_CSR_CFG_WRITE FIELD32(0x00010000) #define RF_CSR_CFG_BUSY FIELD32(0x00020000) @@ -1132,8 +1154,8 @@ * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd) * PROTECT_CTRL: Protection control frame type for CCK TX * 0:none, 1:RTS/CTS, 2:CTS-to-self - * PROTECT_NAV: TXOP protection type for CCK TX - * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect + * PROTECT_NAV_SHORT: TXOP protection type for CCK TX with short NAV + * PROTECT_NAV_LONG: TXOP protection type for CCK TX with long NAV * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow @@ -1145,7 +1167,8 @@ #define CCK_PROT_CFG 0x1364 #define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) #define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) -#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define CCK_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000) +#define CCK_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000) #define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) #define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) #define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) @@ -1160,7 +1183,8 @@ #define OFDM_PROT_CFG 0x1368 #define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) #define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) -#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define OFDM_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000) +#define OFDM_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000) #define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) #define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) #define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) @@ -1175,7 +1199,8 @@ #define MM20_PROT_CFG 0x136c #define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) #define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) -#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define MM20_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000) +#define MM20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000) #define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) #define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) #define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) @@ -1190,7 +1215,8 @@ #define MM40_PROT_CFG 0x1370 #define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) #define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) -#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define MM40_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000) +#define MM40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000) #define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) #define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) #define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) @@ -1205,7 +1231,8 @@ #define GF20_PROT_CFG 0x1374 #define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) #define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) -#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define GF20_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000) +#define GF20_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000) #define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) #define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) #define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) @@ -1220,7 +1247,8 @@ #define GF40_PROT_CFG 0x1378 #define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff) #define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000) -#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000) +#define GF40_PROT_CFG_PROTECT_NAV_SHORT FIELD32(0x00040000) +#define GF40_PROT_CFG_PROTECT_NAV_LONG FIELD32(0x00080000) #define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000) #define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000) #define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000) @@ -1697,11 +1725,14 @@ struct mac_iveiv_entry { */ /* - * BBP 1: TX Antenna & Power - * POWER: 0 - normal, 1 - drop tx power by 6dBm, 2 - drop tx power by 12dBm, - * 3 - increase tx power by 6dBm + * BBP 1: TX Antenna & Power Control + * POWER_CTRL: + * 0 - normal, + * 1 - drop tx power by 6dBm, + * 2 - drop tx power by 12dBm, + * 3 - increase tx power by 6dBm */ -#define BBP1_TX_POWER FIELD8(0x07) +#define BBP1_TX_POWER_CTRL FIELD8(0x07) #define BBP1_TX_ANTENNA FIELD8(0x18) /* @@ -1715,6 +1746,13 @@ struct mac_iveiv_entry { */ #define BBP4_TX_BF FIELD8(0x01) #define BBP4_BANDWIDTH FIELD8(0x18) +#define BBP4_MAC_IF_CTRL FIELD8(0x40) + +/* + * BBP 109 + */ +#define BBP109_TX0_POWER FIELD8(0x0f) +#define BBP109_TX1_POWER FIELD8(0xf0) /* * BBP 138: Unknown @@ -1725,6 +1763,11 @@ struct mac_iveiv_entry { #define BBP138_TX_DAC2 FIELD8(0x40) /* + * BBP 152: Rx Ant + */ +#define BBP152_RX_DEFAULT_ANT FIELD8(0x80) + +/* * RFCSR registers * The wordsize of the RFCSR is 8 bits. */ @@ -1733,12 +1776,18 @@ struct mac_iveiv_entry { * RFCSR 1: */ #define RFCSR1_RF_BLOCK_EN FIELD8(0x01) +#define RFCSR1_PLL_PD FIELD8(0x02) #define RFCSR1_RX0_PD FIELD8(0x04) #define RFCSR1_TX0_PD FIELD8(0x08) #define RFCSR1_RX1_PD FIELD8(0x10) #define RFCSR1_TX1_PD FIELD8(0x20) /* + * RFCSR 2: + */ +#define RFCSR2_RESCAL_EN FIELD8(0x80) + +/* * RFCSR 6: */ #define RFCSR6_R1 FIELD8(0x03) @@ -1750,6 +1799,11 @@ struct mac_iveiv_entry { #define RFCSR7_RF_TUNING FIELD8(0x01) /* + * RFCSR 11: + */ +#define RFCSR11_R FIELD8(0x03) + +/* * RFCSR 12: */ #define RFCSR12_TX_POWER FIELD8(0x1f) @@ -1770,6 +1824,7 @@ struct mac_iveiv_entry { #define RFCSR17_TXMIXER_GAIN FIELD8(0x07) #define RFCSR17_TX_LO1_EN FIELD8(0x08) #define RFCSR17_R FIELD8(0x20) +#define RFCSR17_CODE FIELD8(0x7f) /* * RFCSR 20: @@ -1802,9 +1857,33 @@ struct mac_iveiv_entry { /* * RFCSR 30: */ +#define RFCSR30_TX_H20M FIELD8(0x02) +#define RFCSR30_RX_H20M FIELD8(0x04) +#define RFCSR30_RX_VCM FIELD8(0x18) #define RFCSR30_RF_CALIBRATION FIELD8(0x80) /* + * RFCSR 31: + */ +#define RFCSR31_RX_AGC_FC FIELD8(0x1f) +#define RFCSR31_RX_H20M FIELD8(0x20) + +/* + * RFCSR 38: + */ +#define RFCSR38_RX_LO1_EN FIELD8(0x20) + +/* + * RFCSR 39: + */ +#define RFCSR39_RX_LO2_EN FIELD8(0x80) + +/* + * RFCSR 49: + */ +#define RFCSR49_TX FIELD8(0x3f) + +/* * RF registers */ @@ -1837,6 +1916,11 @@ struct mac_iveiv_entry { */ /* + * Chip ID + */ +#define EEPROM_CHIP_ID 0x0000 + +/* * EEPROM Version */ #define EEPROM_VERSION 0x0001 @@ -1989,23 +2073,26 @@ struct mac_iveiv_entry { #define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00) /* - * EEPROM Maximum TX power values + * EEPROM EIRP Maximum TX power values(unit: dbm) */ -#define EEPROM_MAX_TX_POWER 0x0027 -#define EEPROM_MAX_TX_POWER_24GHZ FIELD16(0x00ff) -#define EEPROM_MAX_TX_POWER_5GHZ FIELD16(0xff00) +#define EEPROM_EIRP_MAX_TX_POWER 0x0027 +#define EEPROM_EIRP_MAX_TX_POWER_2GHZ FIELD16(0x00ff) +#define EEPROM_EIRP_MAX_TX_POWER_5GHZ FIELD16(0xff00) /* * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power. * This is delta in 40MHZ. - * VALUE: Tx Power dalta value (MAX=4) + * VALUE: Tx Power dalta value, MAX=4(unit: dbm) * TYPE: 1: Plus the delta value, 0: minus the delta value - * TXPOWER: Enable: + * ENABLE: enable tx power compensation for 40BW */ #define EEPROM_TXPOWER_DELTA 0x0028 -#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f) -#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040) -#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080) +#define EEPROM_TXPOWER_DELTA_VALUE_2G FIELD16(0x003f) +#define EEPROM_TXPOWER_DELTA_TYPE_2G FIELD16(0x0040) +#define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080) +#define EEPROM_TXPOWER_DELTA_VALUE_5G FIELD16(0x3f00) +#define EEPROM_TXPOWER_DELTA_TYPE_5G FIELD16(0x4000) +#define EEPROM_TXPOWER_DELTA_ENABLE_5G FIELD16(0x8000) /* * EEPROM TXPOWER 802.11BG @@ -2058,6 +2145,7 @@ struct mac_iveiv_entry { #define MCU_LED_LED_POLARITY 0x54 #define MCU_RADAR 0x60 #define MCU_BOOT_SIGNAL 0x72 +#define MCU_ANT_SELECT 0X73 #define MCU_BBP_SIGNAL 0x80 #define MCU_POWER_SAVE 0x83 @@ -2202,4 +2290,9 @@ struct mac_iveiv_entry { #define TXPOWER_A_TO_DEV(__txpower) \ clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER) +/* + * Board's maximun TX power limitation + */ +#define EIRP_MAX_TX_POWER_LIMIT 0x50 + #endif /* RT2800_H */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 54917a281398..2ee6cebb9b25 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -400,8 +400,15 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, if (rt2800_wait_csr_ready(rt2x00dev)) return -EBUSY; - if (rt2x00_is_pci(rt2x00dev)) + if (rt2x00_is_pci(rt2x00dev)) { + if (rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_register_read(rt2x00dev, AUX_CTRL, ®); + rt2x00_set_field32(®, AUX_CTRL_FORCE_PCIE_CLK, 1); + rt2x00_set_field32(®, AUX_CTRL_WAKE_PCIE_EN, 1); + rt2800_register_write(rt2x00dev, AUX_CTRL, reg); + } rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); + } /* * Disable DMA, will be reenabled later when enabling @@ -465,14 +472,15 @@ void rt2800_write_tx_data(struct queue_entry *entry, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXWI_W0_AMPDU, test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags)); - rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density); - rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->txop); - rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs); + rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, + txdesc->u.ht.mpdu_density); + rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->u.ht.txop); + rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->u.ht.mcs); rt2x00_set_field32(&word, TXWI_W0_BW, test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags)); rt2x00_set_field32(&word, TXWI_W0_SHORT_GI, test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags)); - rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc); + rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->u.ht.stbc); rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode); rt2x00_desc_write(txwi, 0, word); @@ -481,7 +489,7 @@ void rt2800_write_tx_data(struct queue_entry *entry, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXWI_W1_NSEQ, test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); - rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size); + rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->u.ht.ba_size); rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID, test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? txdesc->key_idx : 0xff); @@ -674,7 +682,7 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status) * confuse the rate control algortihm by providing clearly wrong * data. */ - if (aggr == 1 && ampdu == 0 && real_mcs != mcs) { + if (unlikely(aggr == 1 && ampdu == 0 && real_mcs != mcs)) { skbdesc->tx_rate_idx = real_mcs; mcs = real_mcs; } @@ -744,7 +752,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev) if (pid >= QID_RX) continue; - queue = rt2x00queue_get_queue(rt2x00dev, pid); + queue = rt2x00queue_get_tx_queue(rt2x00dev, pid); if (unlikely(!queue)) continue; @@ -773,13 +781,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); unsigned int beacon_base; unsigned int padding_len; - u32 reg; + u32 orig_reg, reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); + orig_reg = reg; rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); @@ -810,7 +819,14 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) * Write entire beacon with TXWI and padding to register. */ padding_len = roundup(entry->skb->len, 4) - entry->skb->len; - skb_pad(entry->skb, padding_len); + if (padding_len && skb_pad(entry->skb, padding_len)) { + ERROR(rt2x00dev, "Failure padding beacon, aborting\n"); + /* skb freed by skb_pad() on failure */ + entry->skb = NULL; + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg); + return; + } + beacon_base = HW_BEACON_OFFSET(entry->entry_idx); rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, entry->skb->len + padding_len); @@ -818,8 +834,6 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) /* * Enable beaconing again. */ - rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); - rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1); rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1); rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); @@ -831,8 +845,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) } EXPORT_SYMBOL_GPL(rt2800_write_beacon); -static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev, - unsigned int beacon_base) +static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev, + unsigned int beacon_base) { int i; @@ -845,6 +859,33 @@ static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev, rt2800_register_write(rt2x00dev, beacon_base + i, 0); } +void rt2800_clear_beacon(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + u32 reg; + + /* + * Disable beaconing while we are reloading the beacon data, + * otherwise we might be sending out invalid data. + */ + rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + + /* + * Clear beacon. + */ + rt2800_clear_beacon_register(rt2x00dev, + HW_BEACON_OFFSET(entry->entry_idx)); + + /* + * Enabled beaconing again. + */ + rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1); + rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); +} +EXPORT_SYMBOL_GPL(rt2800_clear_beacon); + #ifdef CONFIG_RT2X00_LIB_DEBUGFS const struct rt2x00debug rt2800_rt2x00debug = { .owner = THIS_MODULE, @@ -1005,7 +1046,7 @@ static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev, memset(&wcid_entry, 0, sizeof(wcid_entry)); if (crypto->cmd == SET_KEY) - memcpy(&wcid_entry, crypto->address, ETH_ALEN); + memcpy(wcid_entry.mac, crypto->address, ETH_ALEN); rt2800_register_multiwrite(rt2x00dev, offset, &wcid_entry, sizeof(wcid_entry)); } @@ -1060,27 +1101,44 @@ int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev, } EXPORT_SYMBOL_GPL(rt2800_config_shared_key); +static inline int rt2800_find_pairwise_keyslot(struct rt2x00_dev *rt2x00dev) +{ + int idx; + u32 offset, reg; + + /* + * Search for the first free pairwise key entry and return the + * corresponding index. + * + * Make sure the WCID starts _after_ the last possible shared key + * entry (>32). + * + * Since parts of the pairwise key table might be shared with + * the beacon frame buffers 6 & 7 we should only write into the + * first 222 entries. + */ + for (idx = 33; idx <= 222; idx++) { + offset = MAC_WCID_ATTR_ENTRY(idx); + rt2800_register_read(rt2x00dev, offset, ®); + if (!reg) + return idx; + } + return -1; +} + int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { struct hw_key_entry key_entry; u32 offset; + int idx; if (crypto->cmd == SET_KEY) { - /* - * 1 pairwise key is possible per AID, this means that the AID - * equals our hw_key_idx. Make sure the WCID starts _after_ the - * last possible shared key entry. - * - * Since parts of the pairwise key table might be shared with - * the beacon frame buffers 6 & 7 we should only write into the - * first 222 entries. - */ - if (crypto->aid > (222 - 32)) + idx = rt2800_find_pairwise_keyslot(rt2x00dev); + if (idx < 0) return -ENOSPC; - - key->hw_key_idx = 32 + crypto->aid; + key->hw_key_idx = idx; memcpy(key_entry.key, crypto->key, sizeof(key_entry.key)); @@ -1155,29 +1213,11 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, if (flags & CONFIG_UPDATE_TYPE) { /* - * Clear current synchronisation setup. - */ - rt2800_clear_beacon(rt2x00dev, - HW_BEACON_OFFSET(intf->beacon->entry_idx)); - /* * Enable synchronisation. */ rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); - rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); rt2x00_set_field32(®, BCN_TIME_CFG_TSF_SYNC, conf->sync); - rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, - (conf->sync == TSF_SYNC_ADHOC || - conf->sync == TSF_SYNC_AP_NONE)); rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); - - /* - * Enable pre tbtt interrupt for beaconing modes - */ - rt2800_register_read(rt2x00dev, INT_TIMER_EN, ®); - rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, - (conf->sync == TSF_SYNC_AP_NONE)); - rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg); - } if (flags & CONFIG_UPDATE_MAC) { @@ -1361,10 +1401,32 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, } EXPORT_SYMBOL_GPL(rt2800_config_erp); +static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev, + enum antenna ant) +{ + u32 reg; + u8 eesk_pin = (ant == ANTENNA_A) ? 1 : 0; + u8 gpio_bit3 = (ant == ANTENNA_A) ? 0 : 1; + + if (rt2x00_is_pci(rt2x00dev)) { + rt2800_register_read(rt2x00dev, E2PROM_CSR, ®); + rt2x00_set_field32(®, E2PROM_CSR_DATA_CLOCK, eesk_pin); + rt2800_register_write(rt2x00dev, E2PROM_CSR, reg); + } else if (rt2x00_is_usb(rt2x00dev)) + rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff, + eesk_pin, 0); + + rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, ®); + rt2x00_set_field32(®, GPIO_CTRL_CFG_GPIOD_BIT3, 0); + rt2x00_set_field32(®, GPIO_CTRL_CFG_BIT3, gpio_bit3); + rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); +} + void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r1; u8 r3; + u16 eeprom; rt2800_bbp_read(rt2x00dev, 1, &r1); rt2800_bbp_read(rt2x00dev, 3, &r3); @@ -1372,7 +1434,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) /* * Configure the TX antenna. */ - switch ((int)ant->tx) { + switch (ant->tx_chain_num) { case 1: rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0); break; @@ -1387,8 +1449,18 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) /* * Configure the RX antenna. */ - switch ((int)ant->rx) { + switch (ant->rx_chain_num) { case 1: + if (rt2x00_rt(rt2x00dev, RT3070) || + rt2x00_rt(rt2x00dev, RT3090) || + rt2x00_rt(rt2x00dev, RT3390)) { + rt2x00_eeprom_read(rt2x00dev, + EEPROM_NIC_CONF1, &eeprom); + if (rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_ANT_DIVERSITY)) + rt2800_set_ant_diversity(rt2x00dev, + rt2x00dev->default_ant.rx); + } rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0); break; case 2: @@ -1434,13 +1506,13 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev, { rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); - if (rt2x00dev->default_ant.tx == 1) + if (rt2x00dev->default_ant.tx_chain_num == 1) rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1); - if (rt2x00dev->default_ant.rx == 1) { + if (rt2x00dev->default_ant.rx_chain_num == 1) { rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1); rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1); - } else if (rt2x00dev->default_ant.rx == 2) + } else if (rt2x00dev->default_ant.rx_chain_num == 2) rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1); if (rf->channel > 14) { @@ -1526,6 +1598,105 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev, rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); } + +#define RT5390_POWER_BOUND 0x27 +#define RT5390_FREQ_OFFSET_BOUND 0x5f + +static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, + struct ieee80211_conf *conf, + struct rf_channel *rf, + struct channel_info *info) +{ + u8 rfcsr; + u16 eeprom; + + rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1); + rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3); + rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2); + rt2800_rfcsr_write(rt2x00dev, 11, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr); + if (info->default_power1 > RT5390_POWER_BOUND) + rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND); + else + rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); + rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); + rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); + rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); + rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); + rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); + if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND) + rt2x00_set_field8(&rfcsr, RFCSR17_CODE, + RT5390_FREQ_OFFSET_BOUND); + else + rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset); + rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + if (rf->channel <= 14) { + int idx = rf->channel-1; + + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) { + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { + /* r55/r59 value array of channel 1~14 */ + static const char r55_bt_rev[] = {0x83, 0x83, + 0x83, 0x73, 0x73, 0x63, 0x53, 0x53, + 0x53, 0x43, 0x43, 0x43, 0x43, 0x43}; + static const char r59_bt_rev[] = {0x0e, 0x0e, + 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07}; + + rt2800_rfcsr_write(rt2x00dev, 55, + r55_bt_rev[idx]); + rt2800_rfcsr_write(rt2x00dev, 59, + r59_bt_rev[idx]); + } else { + static const char r59_bt[] = {0x8b, 0x8b, 0x8b, + 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89, + 0x88, 0x88, 0x86, 0x85, 0x84}; + + rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]); + } + } else { + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { + static const char r55_nonbt_rev[] = {0x23, 0x23, + 0x23, 0x23, 0x13, 0x13, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03}; + static const char r59_nonbt_rev[] = {0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x06, 0x05, 0x04, 0x04}; + + rt2800_rfcsr_write(rt2x00dev, 55, + r55_nonbt_rev[idx]); + rt2800_rfcsr_write(rt2x00dev, 59, + r59_nonbt_rev[idx]); + } else if (rt2x00_rt(rt2x00dev, RT5390)) { + static const char r59_non_bt[] = {0x8f, 0x8f, + 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, + 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86}; + + rt2800_rfcsr_write(rt2x00dev, 59, + r59_non_bt[idx]); + } + } + } + + rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0); + rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0); + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); + rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); +} + static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, @@ -1550,6 +1721,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2x00_rf(rt2x00dev, RF3052) || rt2x00_rf(rt2x00dev, RF3320)) rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); + else if (rt2x00_rf(rt2x00dev, RF5390)) + rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info); else rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); @@ -1562,12 +1735,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2800_bbp_write(rt2x00dev, 86, 0); if (rf->channel <= 14) { - if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { - rt2800_bbp_write(rt2x00dev, 82, 0x62); - rt2800_bbp_write(rt2x00dev, 75, 0x46); - } else { - rt2800_bbp_write(rt2x00dev, 82, 0x84); - rt2800_bbp_write(rt2x00dev, 75, 0x50); + if (!rt2x00_rt(rt2x00dev, RT5390)) { + if (test_bit(CONFIG_EXTERNAL_LNA_BG, + &rt2x00dev->flags)) { + rt2800_bbp_write(rt2x00dev, 82, 0x62); + rt2800_bbp_write(rt2x00dev, 75, 0x46); + } else { + rt2800_bbp_write(rt2x00dev, 82, 0x84); + rt2800_bbp_write(rt2x00dev, 75, 0x50); + } } } else { rt2800_bbp_write(rt2x00dev, 82, 0xf2); @@ -1587,13 +1763,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, tx_pin = 0; /* Turn on unused PA or LNA when not using 1T or 1R */ - if (rt2x00dev->default_ant.tx != 1) { + if (rt2x00dev->default_ant.tx_chain_num == 2) { rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1); rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1); } /* Turn on unused PA or LNA when not using 1T or 1R */ - if (rt2x00dev->default_ant.rx != 1) { + if (rt2x00dev->default_ant.rx_chain_num == 2) { rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1); rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1); } @@ -1637,30 +1813,116 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, ®); } +static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev, + enum ieee80211_band band) +{ + u16 eeprom; + u8 comp_en; + u8 comp_type; + int comp_value; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom); + + if (eeprom == 0xffff) + return 0; + + if (band == IEEE80211_BAND_2GHZ) { + comp_en = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_DELTA_ENABLE_2G); + if (comp_en) { + comp_type = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_DELTA_TYPE_2G); + comp_value = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_DELTA_VALUE_2G); + if (!comp_type) + comp_value = -comp_value; + } + } else { + comp_en = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_DELTA_ENABLE_5G); + if (comp_en) { + comp_type = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_DELTA_TYPE_5G); + comp_value = rt2x00_get_field16(eeprom, + EEPROM_TXPOWER_DELTA_VALUE_5G); + if (!comp_type) + comp_value = -comp_value; + } + } + + return comp_value; +} + +static u8 rt2800_compesate_txpower(struct rt2x00_dev *rt2x00dev, + int is_rate_b, + enum ieee80211_band band, + int power_level, + u8 txpower) +{ + u32 reg; + u16 eeprom; + u8 criterion; + u8 eirp_txpower; + u8 eirp_txpower_criterion; + u8 reg_limit; + int bw_comp = 0; + + if (!((band == IEEE80211_BAND_5GHZ) && is_rate_b)) + return txpower; + + if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) + bw_comp = rt2800_get_txpower_bw_comp(rt2x00dev, band); + + if (test_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags)) { + /* + * Check if eirp txpower exceed txpower_limit. + * We use OFDM 6M as criterion and its eirp txpower + * is stored at EEPROM_EIRP_MAX_TX_POWER. + * .11b data rate need add additional 4dbm + * when calculating eirp txpower. + */ + rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, ®); + criterion = rt2x00_get_field32(reg, TX_PWR_CFG_0_6MBS); + + rt2x00_eeprom_read(rt2x00dev, + EEPROM_EIRP_MAX_TX_POWER, &eeprom); + + if (band == IEEE80211_BAND_2GHZ) + eirp_txpower_criterion = rt2x00_get_field16(eeprom, + EEPROM_EIRP_MAX_TX_POWER_2GHZ); + else + eirp_txpower_criterion = rt2x00_get_field16(eeprom, + EEPROM_EIRP_MAX_TX_POWER_5GHZ); + + eirp_txpower = eirp_txpower_criterion + (txpower - criterion) + + (is_rate_b ? 4 : 0) + bw_comp; + + reg_limit = (eirp_txpower > power_level) ? + (eirp_txpower - power_level) : 0; + } else + reg_limit = 0; + + return txpower + bw_comp - reg_limit; +} + static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, - const int max_txpower) + struct ieee80211_conf *conf) { u8 txpower; - u8 max_value = (u8)max_txpower; u16 eeprom; - int i; + int i, is_rate_b; u32 reg; u8 r1; u32 offset; + enum ieee80211_band band = conf->channel->band; + int power_level = conf->power_level; /* - * set to normal tx power mode: +/- 0dBm + * set to normal bbp tx power control mode: +/- 0dBm */ rt2800_bbp_read(rt2x00dev, 1, &r1); - rt2x00_set_field8(&r1, BBP1_TX_POWER, 0); + rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, 0); rt2800_bbp_write(rt2x00dev, 1, r1); - - /* - * The eeprom contains the tx power values for each rate. These - * values map to 100% tx power. Each 16bit word contains four tx - * power values and the order is the same as used in the TX_PWR_CFG - * registers. - */ offset = TX_PWR_CFG_0; for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { @@ -1674,73 +1936,99 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i, &eeprom); - /* TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS, + is_rate_b = i ? 0 : 1; + /* + * TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS, * TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12, - * TX_PWR_CFG_4: unknown */ + * TX_PWR_CFG_4: unknown + */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); - rt2x00_set_field32(®, TX_PWR_CFG_RATE0, - min(txpower, max_value)); + txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, + power_level, txpower); + rt2x00_set_field32(®, TX_PWR_CFG_RATE0, txpower); - /* TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS, + /* + * TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS, * TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13, - * TX_PWR_CFG_4: unknown */ + * TX_PWR_CFG_4: unknown + */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); - rt2x00_set_field32(®, TX_PWR_CFG_RATE1, - min(txpower, max_value)); + txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, + power_level, txpower); + rt2x00_set_field32(®, TX_PWR_CFG_RATE1, txpower); - /* TX_PWR_CFG_0: 55MBS, TX_PWR_CFG_1: 48MBS, + /* + * TX_PWR_CFG_0: 5.5MBS, TX_PWR_CFG_1: 48MBS, * TX_PWR_CFG_2: MCS6, TX_PWR_CFG_3: MCS14, - * TX_PWR_CFG_4: unknown */ + * TX_PWR_CFG_4: unknown + */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); - rt2x00_set_field32(®, TX_PWR_CFG_RATE2, - min(txpower, max_value)); + txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, + power_level, txpower); + rt2x00_set_field32(®, TX_PWR_CFG_RATE2, txpower); - /* TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS, + /* + * TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS, * TX_PWR_CFG_2: MCS7, TX_PWR_CFG_3: MCS15, - * TX_PWR_CFG_4: unknown */ + * TX_PWR_CFG_4: unknown + */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); - rt2x00_set_field32(®, TX_PWR_CFG_RATE3, - min(txpower, max_value)); + txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, + power_level, txpower); + rt2x00_set_field32(®, TX_PWR_CFG_RATE3, txpower); /* read the next four txpower values */ rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1, &eeprom); - /* TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0, + is_rate_b = 0; + /* + * TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0, * TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown, - * TX_PWR_CFG_4: unknown */ + * TX_PWR_CFG_4: unknown + */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); - rt2x00_set_field32(®, TX_PWR_CFG_RATE4, - min(txpower, max_value)); + txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, + power_level, txpower); + rt2x00_set_field32(®, TX_PWR_CFG_RATE4, txpower); - /* TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1, + /* + * TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1, * TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown, - * TX_PWR_CFG_4: unknown */ + * TX_PWR_CFG_4: unknown + */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); - rt2x00_set_field32(®, TX_PWR_CFG_RATE5, - min(txpower, max_value)); + txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, + power_level, txpower); + rt2x00_set_field32(®, TX_PWR_CFG_RATE5, txpower); - /* TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2, + /* + * TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2, * TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown, - * TX_PWR_CFG_4: unknown */ + * TX_PWR_CFG_4: unknown + */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); - rt2x00_set_field32(®, TX_PWR_CFG_RATE6, - min(txpower, max_value)); + txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, + power_level, txpower); + rt2x00_set_field32(®, TX_PWR_CFG_RATE6, txpower); - /* TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3, + /* + * TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3, * TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown, - * TX_PWR_CFG_4: unknown */ + * TX_PWR_CFG_4: unknown + */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); - rt2x00_set_field32(®, TX_PWR_CFG_RATE7, - min(txpower, max_value)); + txpower = rt2800_compesate_txpower(rt2x00dev, is_rate_b, band, + power_level, txpower); + rt2x00_set_field32(®, TX_PWR_CFG_RATE7, txpower); rt2800_register_write(rt2x00dev, offset, reg); @@ -1799,11 +2087,13 @@ void rt2800_config(struct rt2x00_dev *rt2x00dev, /* Always recalculate LNA gain before changing configuration */ rt2800_config_lna_gain(rt2x00dev, libconf); - if (flags & IEEE80211_CONF_CHANGE_CHANNEL) + if (flags & IEEE80211_CONF_CHANGE_CHANNEL) { rt2800_config_channel(rt2x00dev, libconf->conf, &libconf->rf, &libconf->channel); + rt2800_config_txpower(rt2x00dev, libconf->conf); + } if (flags & IEEE80211_CONF_CHANGE_POWER) - rt2800_config_txpower(rt2x00dev, libconf->conf->power_level); + rt2800_config_txpower(rt2x00dev, libconf->conf); if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) rt2800_config_retry_limit(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_PS) @@ -1832,7 +2122,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev) if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3090) || - rt2x00_rt(rt2x00dev, RT3390)) + rt2x00_rt(rt2x00dev, RT3390) || + rt2x00_rt(rt2x00dev, RT5390)) return 0x1c + (2 * rt2x00dev->lna_gain); else return 0x2e + rt2x00dev->lna_gain; @@ -1964,6 +2255,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f); + } else if (rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); + rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); + rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); } else { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); @@ -2032,7 +2327,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, CCK_PROT_CFG, ®); rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_RATE, 3); rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_CTRL, 0); - rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, CCK_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1); rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(®, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1); @@ -2045,7 +2340,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, ®); rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_RATE, 3); rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_CTRL, 0); - rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, OFDM_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1); rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(®, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1); @@ -2058,7 +2353,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, MM20_PROT_CFG, ®); rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_RATE, 0x4004); rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_CTRL, 0); - rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1); rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(®, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1); @@ -2071,7 +2366,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, MM40_PROT_CFG, ®); rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_RATE, 0x4084); rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_CTRL, 0); - rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1); rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(®, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1); @@ -2084,7 +2379,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, GF20_PROT_CFG, ®); rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_RATE, 0x4004); rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_CTRL, 0); - rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1); rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(®, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1); @@ -2097,7 +2392,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, GF40_PROT_CFG, ®); rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_RATE, 0x4084); rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_CTRL, 0); - rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_NAV, 1); + rt2x00_set_field32(®, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1); rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(®, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1); @@ -2180,26 +2475,30 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i), wcid, sizeof(wcid)); - rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1); + rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 0); rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0); } /* * Clear all beacons */ - rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE0); - rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE1); - rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE2); - rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE3); - rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE4); - rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE5); - rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE6); - rt2800_clear_beacon(rt2x00dev, HW_BEACON_BASE7); + rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0); + rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1); + rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2); + rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3); + rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4); + rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5); + rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6); + rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7); if (rt2x00_is_usb(rt2x00dev)) { rt2800_register_read(rt2x00dev, US_CYC_CNT, ®); rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, 30); rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); + } else if (rt2x00_is_pcie(rt2x00dev)) { + rt2800_register_read(rt2x00dev, US_CYC_CNT, ®); + rt2x00_set_field32(®, US_CYC_CNT_CLOCK_CYCLE, 125); + rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); } rt2800_register_read(rt2x00dev, HT_FBK_CFG0, ®); @@ -2335,15 +2634,31 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) rt2800_wait_bbp_ready(rt2x00dev))) return -EACCES; - if (rt2800_is_305x_soc(rt2x00dev)) + if (rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_bbp_read(rt2x00dev, 4, &value); + rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1); + rt2800_bbp_write(rt2x00dev, 4, value); + } + + if (rt2800_is_305x_soc(rt2x00dev) || + rt2x00_rt(rt2x00dev, RT5390)) rt2800_bbp_write(rt2x00dev, 31, 0x08); rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38); + if (rt2x00_rt(rt2x00dev, RT5390)) + rt2800_bbp_write(rt2x00dev, 68, 0x0b); + if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { rt2800_bbp_write(rt2x00dev, 69, 0x16); rt2800_bbp_write(rt2x00dev, 73, 0x12); + } else if (rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 75, 0x46); + rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 77, 0x59); } else { rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x10); @@ -2354,7 +2669,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3090) || - rt2x00_rt(rt2x00dev, RT3390)) { + rt2x00_rt(rt2x00dev, RT3390) || + rt2x00_rt(rt2x00dev, RT5390)) { rt2800_bbp_write(rt2x00dev, 79, 0x13); rt2800_bbp_write(rt2x00dev, 80, 0x05); rt2800_bbp_write(rt2x00dev, 81, 0x33); @@ -2366,35 +2682,62 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) } rt2800_bbp_write(rt2x00dev, 82, 0x62); - rt2800_bbp_write(rt2x00dev, 83, 0x6a); + if (rt2x00_rt(rt2x00dev, RT5390)) + rt2800_bbp_write(rt2x00dev, 83, 0x7a); + else + rt2800_bbp_write(rt2x00dev, 83, 0x6a); if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) rt2800_bbp_write(rt2x00dev, 84, 0x19); + else if (rt2x00_rt(rt2x00dev, RT5390)) + rt2800_bbp_write(rt2x00dev, 84, 0x9a); else rt2800_bbp_write(rt2x00dev, 84, 0x99); - rt2800_bbp_write(rt2x00dev, 86, 0x00); + if (rt2x00_rt(rt2x00dev, RT5390)) + rt2800_bbp_write(rt2x00dev, 86, 0x38); + else + rt2800_bbp_write(rt2x00dev, 86, 0x00); + rt2800_bbp_write(rt2x00dev, 91, 0x04); - rt2800_bbp_write(rt2x00dev, 92, 0x00); + + if (rt2x00_rt(rt2x00dev, RT5390)) + rt2800_bbp_write(rt2x00dev, 92, 0x02); + else + rt2800_bbp_write(rt2x00dev, 92, 0x00); if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || + rt2x00_rt(rt2x00dev, RT5390) || rt2800_is_305x_soc(rt2x00dev)) rt2800_bbp_write(rt2x00dev, 103, 0xc0); else rt2800_bbp_write(rt2x00dev, 103, 0x00); + if (rt2x00_rt(rt2x00dev, RT5390)) + rt2800_bbp_write(rt2x00dev, 104, 0x92); + if (rt2800_is_305x_soc(rt2x00dev)) rt2800_bbp_write(rt2x00dev, 105, 0x01); + else if (rt2x00_rt(rt2x00dev, RT5390)) + rt2800_bbp_write(rt2x00dev, 105, 0x3c); else rt2800_bbp_write(rt2x00dev, 105, 0x05); - rt2800_bbp_write(rt2x00dev, 106, 0x35); + + if (rt2x00_rt(rt2x00dev, RT5390)) + rt2800_bbp_write(rt2x00dev, 106, 0x03); + else + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + if (rt2x00_rt(rt2x00dev, RT5390)) + rt2800_bbp_write(rt2x00dev, 128, 0x12); if (rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3090) || - rt2x00_rt(rt2x00dev, RT3390)) { + rt2x00_rt(rt2x00dev, RT3390) || + rt2x00_rt(rt2x00dev, RT5390)) { rt2800_bbp_read(rt2x00dev, 138, &value); rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); @@ -2406,6 +2749,42 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) rt2800_bbp_write(rt2x00dev, 138, value); } + if (rt2x00_rt(rt2x00dev, RT5390)) { + int ant, div_mode; + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + div_mode = rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_ANT_DIVERSITY); + ant = (div_mode == 3) ? 1 : 0; + + /* check if this is a Bluetooth combo card */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) { + u32 reg; + + rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, ®); + rt2x00_set_field32(®, GPIO_CTRL_CFG_GPIOD_BIT3, 0); + rt2x00_set_field32(®, GPIO_CTRL_CFG_GPIOD_BIT6, 0); + rt2x00_set_field32(®, GPIO_CTRL_CFG_BIT3, 0); + rt2x00_set_field32(®, GPIO_CTRL_CFG_BIT6, 0); + if (ant == 0) + rt2x00_set_field32(®, GPIO_CTRL_CFG_BIT3, 1); + else if (ant == 1) + rt2x00_set_field32(®, GPIO_CTRL_CFG_BIT6, 1); + rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); + } + + rt2800_bbp_read(rt2x00dev, 152, &value); + if (ant == 0) + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); + else + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); + rt2800_bbp_write(rt2x00dev, 152, value); + + /* Init frequency calibration */ + rt2800_bbp_write(rt2x00dev, 142, 1); + rt2800_bbp_write(rt2x00dev, 143, 57); + } for (i = 0; i < EEPROM_BBP_SIZE; i++) { rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); @@ -2436,6 +2815,10 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev, rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40); rt2800_bbp_write(rt2x00dev, 4, bbp); + rt2800_rfcsr_read(rt2x00dev, 31, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40); + rt2800_rfcsr_write(rt2x00dev, 31, rfcsr); + rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1); rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); @@ -2491,18 +2874,28 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) !rt2x00_rt(rt2x00dev, RT3071) && !rt2x00_rt(rt2x00dev, RT3090) && !rt2x00_rt(rt2x00dev, RT3390) && + !rt2x00_rt(rt2x00dev, RT5390) && !rt2800_is_305x_soc(rt2x00dev)) return 0; /* * Init RF calibration. */ - rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); - rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); - rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); - msleep(1); - rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); - rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); + if (rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1); + rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); + msleep(1); + rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0); + rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); + } else { + rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); + msleep(1); + rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); + } if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071) || @@ -2510,7 +2903,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 4, 0x40); rt2800_rfcsr_write(rt2x00dev, 5, 0x03); rt2800_rfcsr_write(rt2x00dev, 6, 0x02); - rt2800_rfcsr_write(rt2x00dev, 7, 0x70); + rt2800_rfcsr_write(rt2x00dev, 7, 0x60); rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); rt2800_rfcsr_write(rt2x00dev, 10, 0x41); rt2800_rfcsr_write(rt2x00dev, 11, 0x21); @@ -2593,6 +2986,87 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 30, 0x00); rt2800_rfcsr_write(rt2x00dev, 31, 0x00); return 0; + } else if (rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); + rt2800_rfcsr_write(rt2x00dev, 2, 0x80); + rt2800_rfcsr_write(rt2x00dev, 3, 0x88); + rt2800_rfcsr_write(rt2x00dev, 5, 0x10); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); + else + rt2800_rfcsr_write(rt2x00dev, 6, 0xa0); + rt2800_rfcsr_write(rt2x00dev, 7, 0x00); + rt2800_rfcsr_write(rt2x00dev, 10, 0x53); + rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); + rt2800_rfcsr_write(rt2x00dev, 12, 0xc6); + rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); + rt2800_rfcsr_write(rt2x00dev, 14, 0x00); + rt2800_rfcsr_write(rt2x00dev, 15, 0x00); + rt2800_rfcsr_write(rt2x00dev, 16, 0x00); + rt2800_rfcsr_write(rt2x00dev, 18, 0x03); + rt2800_rfcsr_write(rt2x00dev, 19, 0x00); + + rt2800_rfcsr_write(rt2x00dev, 20, 0x00); + rt2800_rfcsr_write(rt2x00dev, 21, 0x00); + rt2800_rfcsr_write(rt2x00dev, 22, 0x20); + rt2800_rfcsr_write(rt2x00dev, 23, 0x00); + rt2800_rfcsr_write(rt2x00dev, 24, 0x00); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 25, 0x80); + else + rt2800_rfcsr_write(rt2x00dev, 25, 0xc0); + rt2800_rfcsr_write(rt2x00dev, 26, 0x00); + rt2800_rfcsr_write(rt2x00dev, 27, 0x09); + rt2800_rfcsr_write(rt2x00dev, 28, 0x00); + rt2800_rfcsr_write(rt2x00dev, 29, 0x10); + + rt2800_rfcsr_write(rt2x00dev, 30, 0x00); + rt2800_rfcsr_write(rt2x00dev, 31, 0x80); + rt2800_rfcsr_write(rt2x00dev, 32, 0x80); + rt2800_rfcsr_write(rt2x00dev, 33, 0x00); + rt2800_rfcsr_write(rt2x00dev, 34, 0x07); + rt2800_rfcsr_write(rt2x00dev, 35, 0x12); + rt2800_rfcsr_write(rt2x00dev, 36, 0x00); + rt2800_rfcsr_write(rt2x00dev, 37, 0x08); + rt2800_rfcsr_write(rt2x00dev, 38, 0x85); + rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); + + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); + else + rt2800_rfcsr_write(rt2x00dev, 40, 0x4b); + rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); + rt2800_rfcsr_write(rt2x00dev, 42, 0xd2); + rt2800_rfcsr_write(rt2x00dev, 43, 0x9a); + rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); + rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 46, 0x73); + else + rt2800_rfcsr_write(rt2x00dev, 46, 0x7b); + rt2800_rfcsr_write(rt2x00dev, 47, 0x00); + rt2800_rfcsr_write(rt2x00dev, 48, 0x10); + rt2800_rfcsr_write(rt2x00dev, 49, 0x94); + + rt2800_rfcsr_write(rt2x00dev, 52, 0x38); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 53, 0x00); + else + rt2800_rfcsr_write(rt2x00dev, 53, 0x84); + rt2800_rfcsr_write(rt2x00dev, 54, 0x78); + rt2800_rfcsr_write(rt2x00dev, 55, 0x44); + rt2800_rfcsr_write(rt2x00dev, 56, 0x22); + rt2800_rfcsr_write(rt2x00dev, 57, 0x80); + rt2800_rfcsr_write(rt2x00dev, 58, 0x7f); + rt2800_rfcsr_write(rt2x00dev, 59, 0x63); + + rt2800_rfcsr_write(rt2x00dev, 60, 0x45); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) + rt2800_rfcsr_write(rt2x00dev, 61, 0xd1); + else + rt2800_rfcsr_write(rt2x00dev, 61, 0xdd); + rt2800_rfcsr_write(rt2x00dev, 62, 0x00); + rt2800_rfcsr_write(rt2x00dev, 63, 0x00); } if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) { @@ -2602,12 +3076,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, LDO_CFG0, reg); } else if (rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3090)) { + rt2800_rfcsr_write(rt2x00dev, 31, 0x14); + rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr); rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1); rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); - rt2800_rfcsr_write(rt2x00dev, 31, 0x14); - rt2800_register_read(rt2x00dev, LDO_CFG0, ®); rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1); if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || @@ -2619,6 +3093,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 0); } rt2800_register_write(rt2x00dev, LDO_CFG0, reg); + + rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®); + rt2x00_set_field32(®, GPIO_SWITCH_5, 0); + rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg); } else if (rt2x00_rt(rt2x00dev, RT3390)) { rt2800_register_read(rt2x00dev, GPIO_SWITCH, ®); rt2x00_set_field32(®, GPIO_SWITCH_5, 0); @@ -2642,21 +3120,23 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15); } - /* - * Set back to initial state - */ - rt2800_bbp_write(rt2x00dev, 24, 0); + if (!rt2x00_rt(rt2x00dev, RT5390)) { + /* + * Set back to initial state + */ + rt2800_bbp_write(rt2x00dev, 24, 0); - rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); - rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0); - rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); + rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0); + rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); - /* - * set BBP back to BW20 - */ - rt2800_bbp_read(rt2x00dev, 4, &bbp); - rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0); - rt2800_bbp_write(rt2x00dev, 4, bbp); + /* + * Set BBP back to BW20 + */ + rt2800_bbp_read(rt2x00dev, 4, &bbp); + rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0); + rt2800_bbp_write(rt2x00dev, 4, bbp); + } if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) || rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || @@ -2668,24 +3148,29 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, OPT_14_CSR_BIT0, 1); rt2800_register_write(rt2x00dev, OPT_14_CSR, reg); - rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); - rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0); - if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || - rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || - rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { - if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) - rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); - } - rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom); - if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1) - rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN, - rt2x00_get_field16(eeprom, - EEPROM_TXMIXER_GAIN_BG_VAL)); - rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); + if (!rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0); + if (rt2x00_rt(rt2x00dev, RT3070) || + rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || + rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || + rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { + if (!test_bit(CONFIG_EXTERNAL_LNA_BG, + &rt2x00dev->flags)) + rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); + } + rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1) + rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN, + rt2x00_get_field16(eeprom, + EEPROM_TXMIXER_GAIN_BG_VAL)); + rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); + } if (rt2x00_rt(rt2x00dev, RT3090)) { rt2800_bbp_read(rt2x00dev, 138, &bbp); + /* Turn off unused DAC1 and ADC1 to reduce power consumption */ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0); @@ -2719,10 +3204,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 21, rfcsr); } - if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) { + if (rt2x00_rt(rt2x00dev, RT3070)) { rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr); - if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) || - rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E)) + if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3); else rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0); @@ -2732,6 +3216,20 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_rfcsr_write(rt2x00dev, 27, rfcsr); } + if (rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0); + rt2800_rfcsr_write(rt2x00dev, 38, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0); + rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); + + rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); + rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2); + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); + } + return 0; } @@ -2810,10 +3308,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, ®); rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); rt2x00_set_field32(®, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); - rt2x00_set_field32(®, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); /* Wait for DMA, ignore error */ @@ -2823,9 +3318,6 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_TX, 0); rt2x00_set_field32(®, MAC_SYS_CTRL_ENABLE_RX, 0); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); - - rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); - rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); } EXPORT_SYMBOL_GPL(rt2800_disable_radio); @@ -2986,13 +3478,6 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) default_lna_gain); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); - rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word); - if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff) - rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER); - if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff) - rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER); - rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word); - return 0; } EXPORT_SYMBOL_GPL(rt2800_validate_eeprom); @@ -3009,10 +3494,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); /* - * Identify RF chipset. + * Identify RF chipset by EEPROM value + * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field + * RT53xx: defined in "EEPROM_CHIP_ID" field */ - value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); rt2800_register_read(rt2x00dev, MAC_CSR0, ®); + if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390) + rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value); + else + value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); @@ -3024,7 +3514,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) !rt2x00_rt(rt2x00dev, RT3071) && !rt2x00_rt(rt2x00dev, RT3090) && !rt2x00_rt(rt2x00dev, RT3390) && - !rt2x00_rt(rt2x00dev, RT3572)) { + !rt2x00_rt(rt2x00dev, RT3572) && + !rt2x00_rt(rt2x00dev, RT5390)) { ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); return -ENODEV; } @@ -3038,7 +3529,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) !rt2x00_rf(rt2x00dev, RF3021) && !rt2x00_rf(rt2x00dev, RF3022) && !rt2x00_rf(rt2x00dev, RF3052) && - !rt2x00_rf(rt2x00dev, RF3320)) { + !rt2x00_rf(rt2x00dev, RF3320) && + !rt2x00_rf(rt2x00dev, RF5390)) { ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); return -ENODEV; } @@ -3046,11 +3538,35 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) /* * Identify default antenna configuration. */ - rt2x00dev->default_ant.tx = + rt2x00dev->default_ant.tx_chain_num = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH); - rt2x00dev->default_ant.rx = + rt2x00dev->default_ant.rx_chain_num = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH); + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + + if (rt2x00_rt(rt2x00dev, RT3070) || + rt2x00_rt(rt2x00dev, RT3090) || + rt2x00_rt(rt2x00dev, RT3390)) { + value = rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_ANT_DIVERSITY); + switch (value) { + case 0: + case 1: + case 2: + rt2x00dev->default_ant.tx = ANTENNA_A; + rt2x00dev->default_ant.rx = ANTENNA_A; + break; + case 3: + rt2x00dev->default_ant.tx = ANTENNA_A; + rt2x00dev->default_ant.rx = ANTENNA_B; + break; + } + } else { + rt2x00dev->default_ant.tx = ANTENNA_A; + rt2x00dev->default_ant.rx = ANTENNA_A; + } + /* * Read frequency offset and RF programming sequence. */ @@ -3084,6 +3600,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg); #endif /* CONFIG_RT2X00_LIB_LEDS */ + /* + * Check if support EIRP tx power limit feature. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom); + + if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) < + EIRP_MAX_TX_POWER_LIMIT) + __set_bit(CONFIG_SUPPORT_POWER_LIMIT, &rt2x00dev->flags); + return 0; } EXPORT_SYMBOL_GPL(rt2800_init_eeprom); @@ -3236,7 +3761,6 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) char *default_power1; char *default_power2; unsigned int i; - unsigned short max_power; u16 eeprom; /* @@ -3303,7 +3827,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) rt2x00_rf(rt2x00dev, RF2020) || rt2x00_rf(rt2x00dev, RF3021) || rt2x00_rf(rt2x00dev, RF3022) || - rt2x00_rf(rt2x00dev, RF3320)) { + rt2x00_rf(rt2x00dev, RF3320) || + rt2x00_rf(rt2x00dev, RF5390)) { spec->num_channels = 14; spec->channels = rf_vals_3x; } else if (rt2x00_rf(rt2x00dev, RF3052)) { @@ -3361,26 +3886,21 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->channels_info = info; - rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom); - max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ); default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); for (i = 0; i < 14; i++) { - info[i].max_power = max_power; - info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]); - info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]); + info[i].default_power1 = default_power1[i]; + info[i].default_power2 = default_power2[i]; } if (spec->num_channels > 14) { - max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ); default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1); default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); for (i = 14; i < spec->num_channels; i++) { - info[i].max_power = max_power; - info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]); - info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]); + info[i].default_power1 = default_power1[i]; + info[i].default_power2 = default_power2[i]; } } @@ -3472,7 +3992,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, if (queue_idx >= 4) return 0; - queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); /* Update WMM TXOP register */ offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2))); @@ -3530,7 +4050,8 @@ EXPORT_SYMBOL_GPL(rt2800_get_tsf); int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn) + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) { int ret = 0; diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h index e3c995a9dec4..0c92d86a36f4 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/rt2x00/rt2800lib.h @@ -156,6 +156,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev); void rt2800_txdone_entry(struct queue_entry *entry, u32 status); void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc); +void rt2800_clear_beacon(struct queue_entry *entry); extern const struct rt2x00debug rt2800_rt2x00debug; @@ -198,7 +199,8 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, u64 rt2800_get_tsf(struct ieee80211_hw *hw); int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn); + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size); int rt2800_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey); diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 3b3f1e45ab3e..808073aa9dcc 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -200,11 +200,22 @@ static void rt2800pci_start_queue(struct data_queue *queue) rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); break; case QID_BEACON: + /* + * Allow beacon tasklets to be scheduled for periodic + * beacon updates. + */ + tasklet_enable(&rt2x00dev->tbtt_tasklet); + tasklet_enable(&rt2x00dev->pretbtt_tasklet); + rt2800_register_read(rt2x00dev, BCN_TIME_CFG, ®); rt2x00_set_field32(®, BCN_TIME_CFG_TSF_TICKING, 1); rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 1); rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 1); rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + + rt2800_register_read(rt2x00dev, INT_TIMER_EN, ®); + rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, 1); + rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg); break; default: break; @@ -250,6 +261,16 @@ static void rt2800pci_stop_queue(struct data_queue *queue) rt2x00_set_field32(®, BCN_TIME_CFG_TBTT_ENABLE, 0); rt2x00_set_field32(®, BCN_TIME_CFG_BEACON_GEN, 0); rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); + + rt2800_register_read(rt2x00dev, INT_TIMER_EN, ®); + rt2x00_set_field32(®, INT_TIMER_EN_PRE_TBTT_TIMER, 0); + rt2800_register_write(rt2x00dev, INT_TIMER_EN, reg); + + /* + * Wait for tbtt tasklets to finish. + */ + tasklet_disable(&rt2x00dev->tbtt_tasklet); + tasklet_disable(&rt2x00dev->pretbtt_tasklet); break; default: break; @@ -397,9 +418,9 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev) static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - int mask = (state == STATE_RADIO_IRQ_ON) || - (state == STATE_RADIO_IRQ_ON_ISR); + int mask = (state == STATE_RADIO_IRQ_ON); u32 reg; + unsigned long flags; /* * When interrupts are being enabled, the interrupt registers @@ -408,8 +429,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, if (state == STATE_RADIO_IRQ_ON) { rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, ®); rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg); + + /* + * Enable tasklets. The beacon related tasklets are + * enabled when the beacon queue is started. + */ + tasklet_enable(&rt2x00dev->txstatus_tasklet); + tasklet_enable(&rt2x00dev->rxdone_tasklet); + tasklet_enable(&rt2x00dev->autowake_tasklet); } + spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); rt2800_register_read(rt2x00dev, INT_MASK_CSR, ®); rt2x00_set_field32(®, INT_MASK_CSR_RXDELAYINT, 0); rt2x00_set_field32(®, INT_MASK_CSR_TXDELAYINT, 0); @@ -430,6 +460,17 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(®, INT_MASK_CSR_RX_COHERENT, 0); rt2x00_set_field32(®, INT_MASK_CSR_TX_COHERENT, 0); rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); + spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); + + if (state == STATE_RADIO_IRQ_OFF) { + /* + * Ensure that all tasklets are finished before + * disabling the interrupts. + */ + tasklet_disable(&rt2x00dev->txstatus_tasklet); + tasklet_disable(&rt2x00dev->rxdone_tasklet); + tasklet_disable(&rt2x00dev->autowake_tasklet); + } } static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev) @@ -452,6 +493,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); + if (rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_register_read(rt2x00dev, AUX_CTRL, ®); + rt2x00_set_field32(®, AUX_CTRL_FORCE_PCIE_CLK, 1); + rt2x00_set_field32(®, AUX_CTRL_WAKE_PCIE_EN, 1); + rt2800_register_write(rt2x00dev, AUX_CTRL, reg); + } + rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, ®); @@ -475,39 +523,23 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev) { - u32 reg; - - rt2800_disable_radio(rt2x00dev); - - rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280); - - rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, ®); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX0, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX1, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX2, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX3, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX4, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DTX_IDX5, 1); - rt2x00_set_field32(®, WPDMA_RST_IDX_DRX_IDX0, 1); - rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg); - - rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); - rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); + if (rt2x00_is_soc(rt2x00dev)) { + rt2800_disable_radio(rt2x00dev); + rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0); + rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0); + } } static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - /* - * Always put the device to sleep (even when we intend to wakeup!) - * if the device is booting and wasn't asleep it will return - * failure when attempting to wakeup. - */ - rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2); - if (state == STATE_AWAKE) { - rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0); + rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0x02); rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP); + } else if (state == STATE_SLEEP) { + rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff); + rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff); + rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0x01, 0xff, 0x01); } return 0; @@ -538,9 +570,7 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev, rt2800pci_set_state(rt2x00dev, STATE_SLEEP); break; case STATE_RADIO_IRQ_ON: - case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: - case STATE_RADIO_IRQ_OFF_ISR: rt2800pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: @@ -696,7 +726,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) { qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE); - if (qid >= QID_RX) { + if (unlikely(qid >= QID_RX)) { /* * Unknown queue, this shouldn't happen. Just drop * this tx status. @@ -706,7 +736,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) break; } - queue = rt2x00queue_get_queue(rt2x00dev, qid); + queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); if (unlikely(queue == NULL)) { /* * The queue is NULL, this shouldn't happen. Stop @@ -717,7 +747,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) break; } - if (rt2x00queue_empty(queue)) { + if (unlikely(rt2x00queue_empty(queue))) { /* * The queue is empty. Stop processing here * and drop the tx status. @@ -732,45 +762,59 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) } } -static void rt2800pci_txstatus_tasklet(unsigned long data) +static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, + struct rt2x00_field32 irq_field) { - rt2800pci_txdone((struct rt2x00_dev *)data); -} - -static irqreturn_t rt2800pci_interrupt_thread(int irq, void *dev_instance) -{ - struct rt2x00_dev *rt2x00dev = dev_instance; - u32 reg = rt2x00dev->irqvalue[0]; + u32 reg; /* - * 1 - Pre TBTT interrupt. + * Enable a single interrupt. The interrupt mask register + * access needs locking. */ - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT)) - rt2x00lib_pretbtt(rt2x00dev); + spin_lock_irq(&rt2x00dev->irqmask_lock); + rt2800_register_read(rt2x00dev, INT_MASK_CSR, ®); + rt2x00_set_field32(®, irq_field, 1); + rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); + spin_unlock_irq(&rt2x00dev->irqmask_lock); +} - /* - * 2 - Beacondone interrupt. - */ - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT)) - rt2x00lib_beacondone(rt2x00dev); +static void rt2800pci_txstatus_tasklet(unsigned long data) +{ + rt2800pci_txdone((struct rt2x00_dev *)data); /* - * 3 - Rx ring done interrupt. + * No need to enable the tx status interrupt here as we always + * leave it enabled to minimize the possibility of a tx status + * register overflow. See comment in interrupt handler. */ - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) - rt2x00pci_rxdone(rt2x00dev); +} - /* - * 4 - Auto wakeup interrupt. - */ - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) - rt2800pci_wakeup(rt2x00dev); +static void rt2800pci_pretbtt_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt2x00lib_pretbtt(rt2x00dev); + rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT); +} - /* Enable interrupts again. */ - rt2x00dev->ops->lib->set_device_state(rt2x00dev, - STATE_RADIO_IRQ_ON_ISR); +static void rt2800pci_tbtt_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt2x00lib_beacondone(rt2x00dev); + rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT); +} - return IRQ_HANDLED; +static void rt2800pci_rxdone_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt2x00pci_rxdone(rt2x00dev); + rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE); +} + +static void rt2800pci_autowake_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt2800pci_wakeup(rt2x00dev); + rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP); } static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev) @@ -791,7 +835,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev) * * Furthermore we don't disable the TX_FIFO_STATUS * interrupt here but leave it enabled so that the TX_STA_FIFO - * can also be read while the interrupt thread gets executed. + * can also be read while the tx status tasklet gets executed. * * Since we have only one producer and one consumer we don't * need to lock the kfifo. @@ -816,8 +860,7 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev) static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; - u32 reg; - irqreturn_t ret = IRQ_HANDLED; + u32 reg, mask; /* Read status and ACK all interrupts */ rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, ®); @@ -829,38 +872,44 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return IRQ_HANDLED; - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) - rt2800pci_txstatus_interrupt(rt2x00dev); + /* + * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits + * for interrupts and interrupt masks we can just use the value of + * INT_SOURCE_CSR to create the interrupt mask. + */ + mask = ~reg; - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT) || - rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT) || - rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE) || - rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) { + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) { + rt2800pci_txstatus_interrupt(rt2x00dev); /* - * All other interrupts are handled in the interrupt thread. - * Store irqvalue for use in the interrupt thread. + * Never disable the TX_FIFO_STATUS interrupt. */ - rt2x00dev->irqvalue[0] = reg; + rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1); + } - /* - * Disable interrupts, will be enabled again in the - * interrupt thread. - */ - rt2x00dev->ops->lib->set_device_state(rt2x00dev, - STATE_RADIO_IRQ_OFF_ISR); + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT)) + tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet); - /* - * Leave the TX_FIFO_STATUS interrupt enabled to not lose any - * tx status reports. - */ - rt2800_register_read(rt2x00dev, INT_MASK_CSR, ®); - rt2x00_set_field32(®, INT_MASK_CSR_TX_FIFO_STATUS, 1); - rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT)) + tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); - ret = IRQ_WAKE_THREAD; - } + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) + tasklet_schedule(&rt2x00dev->rxdone_tasklet); - return ret; + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) + tasklet_schedule(&rt2x00dev->autowake_tasklet); + + /* + * Disable all interrupts for which a tasklet was scheduled right now, + * the tasklet will reenable the appropriate interrupts. + */ + spin_lock(&rt2x00dev->irqmask_lock); + rt2800_register_read(rt2x00dev, INT_MASK_CSR, ®); + reg &= mask; + rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); + spin_unlock(&rt2x00dev->irqmask_lock); + + return IRQ_HANDLED; } /* @@ -928,6 +977,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) if (!modparam_nohwcrypt) __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags); /* * Set the rssi offset. @@ -975,8 +1025,11 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = { static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { .irq_handler = rt2800pci_interrupt, - .irq_handler_thread = rt2800pci_interrupt_thread, - .txstatus_tasklet = rt2800pci_txstatus_tasklet, + .txstatus_tasklet = rt2800pci_txstatus_tasklet, + .pretbtt_tasklet = rt2800pci_pretbtt_tasklet, + .tbtt_tasklet = rt2800pci_tbtt_tasklet, + .rxdone_tasklet = rt2800pci_rxdone_tasklet, + .autowake_tasklet = rt2800pci_autowake_tasklet, .probe_hw = rt2800pci_probe_hw, .get_firmware_name = rt2800pci_get_firmware_name, .check_firmware = rt2800_check_firmware, @@ -996,6 +1049,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { .write_tx_desc = rt2800pci_write_tx_desc, .write_tx_data = rt2800_write_tx_data, .write_beacon = rt2800_write_beacon, + .clear_beacon = rt2800_clear_beacon, .fill_rxdone = rt2800pci_fill_rxdone, .config_shared_key = rt2800_config_shared_key, .config_pairwise_key = rt2800_config_pairwise_key, @@ -1079,6 +1133,9 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) }, { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) }, #endif +#ifdef CONFIG_RT2800PCI_RT53XX + { PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) }, +#endif { 0, } }; #endif /* CONFIG_PCI */ diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 197a36c05fda..f1a92144996f 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -253,9 +253,7 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev, rt2800usb_set_state(rt2x00dev, STATE_SLEEP); break; case STATE_RADIO_IRQ_ON: - case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: - case STATE_RADIO_IRQ_OFF_ISR: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: @@ -567,6 +565,7 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); + __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags); /* * Set the rssi offset. @@ -639,6 +638,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { .write_tx_desc = rt2800usb_write_tx_desc, .write_tx_data = rt2800usb_write_tx_data, .write_beacon = rt2800_write_beacon, + .clear_beacon = rt2800_clear_beacon, .get_tx_data_len = rt2800usb_get_tx_data_len, .fill_rxdone = rt2800usb_fill_rxdone, .config_shared_key = rt2800_config_shared_key, diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 84aaf393da43..a3940d7300a4 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -189,6 +189,7 @@ struct rt2x00_chip { #define RT3572 0x3572 #define RT3593 0x3593 /* PCIe */ #define RT3883 0x3883 /* WSOC */ +#define RT5390 0x5390 /* 2.4GHz */ u16 rf; u16 rev; @@ -225,6 +226,8 @@ struct channel_info { struct antenna_setup { enum antenna rx; enum antenna tx; + u8 rx_chain_num; + u8 tx_chain_num; }; /* @@ -368,6 +371,7 @@ struct rt2x00_intf { * dedicated beacon entry. */ struct queue_entry *beacon; + bool enable_beacon; /* * Actions that needed rescheduling. @@ -463,7 +467,6 @@ struct rt2x00lib_crypto { const u8 *address; u32 bssidx; - u32 aid; u8 key[16]; u8 tx_mic[8]; @@ -511,14 +514,13 @@ struct rt2x00lib_ops { irq_handler_t irq_handler; /* - * Threaded Interrupt handlers. - */ - irq_handler_t irq_handler_thread; - - /* * TX status tasklet handler. */ void (*txstatus_tasklet) (unsigned long data); + void (*pretbtt_tasklet) (unsigned long data); + void (*tbtt_tasklet) (unsigned long data); + void (*rxdone_tasklet) (unsigned long data); + void (*autowake_tasklet) (unsigned long data); /* * Device init handlers. @@ -573,6 +575,7 @@ struct rt2x00lib_ops { struct txentry_desc *txdesc); void (*write_beacon) (struct queue_entry *entry, struct txentry_desc *txdesc); + void (*clear_beacon) (struct queue_entry *entry); int (*get_tx_data_len) (struct queue_entry *entry); /* @@ -658,12 +661,15 @@ enum rt2x00_flags { DRIVER_REQUIRE_L2PAD, DRIVER_REQUIRE_TXSTATUS_FIFO, DRIVER_REQUIRE_TASKLET_CONTEXT, + DRIVER_REQUIRE_SW_SEQNO, + DRIVER_REQUIRE_HT_TX_DESC, /* * Driver features */ CONFIG_SUPPORT_HW_BUTTON, CONFIG_SUPPORT_HW_CRYPTO, + CONFIG_SUPPORT_POWER_LIMIT, DRIVER_SUPPORT_CONTROL_FILTERS, DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, DRIVER_SUPPORT_PRE_TBTT_INTERRUPT, @@ -788,10 +794,12 @@ struct rt2x00_dev { * - Open ap interface count. * - Open sta interface count. * - Association count. + * - Beaconing enabled count. */ unsigned int intf_ap_count; unsigned int intf_sta_count; unsigned int intf_associated; + unsigned int intf_beaconing; /* * Link quality @@ -857,6 +865,13 @@ struct rt2x00_dev { */ struct ieee80211_low_level_stats low_level_stats; + /** + * Work queue for all work which should not be placed + * on the mac80211 workqueue (because of dependencies + * between various work structures). + */ + struct workqueue_struct *workqueue; + /* * Scheduled work. * NOTE: intf_work will use ieee80211_iterate_active_interfaces() @@ -872,14 +887,13 @@ struct rt2x00_dev { struct work_struct txdone_work; /* - * Data queue arrays for RX, TX and Beacon. - * The Beacon array also contains the Atim queue - * if that is supported by the device. + * Data queue arrays for RX, TX, Beacon and ATIM. */ unsigned int data_queues; struct data_queue *rx; struct data_queue *tx; struct data_queue *bcn; + struct data_queue *atim; /* * Firmware image. @@ -887,12 +901,6 @@ struct rt2x00_dev { const struct firmware *fw; /* - * Interrupt values, stored between interrupt service routine - * and interrupt thread routine. - */ - u32 irqvalue[2]; - - /* * FIFO for storing tx status reports between isr and tasklet. */ DECLARE_KFIFO_PTR(txstatus_fifo, u32); @@ -901,6 +909,15 @@ struct rt2x00_dev { * Tasklet for processing tx status reports (rt2800pci). */ struct tasklet_struct txstatus_tasklet; + struct tasklet_struct pretbtt_tasklet; + struct tasklet_struct tbtt_tasklet; + struct tasklet_struct rxdone_tasklet; + struct tasklet_struct autowake_tasklet; + + /* + * Protect the interrupt mask register. + */ + spinlock_t irqmask_lock; }; /* @@ -1046,12 +1063,24 @@ void rt2x00queue_map_txskb(struct queue_entry *entry); void rt2x00queue_unmap_skb(struct queue_entry *entry); /** - * rt2x00queue_get_queue - Convert queue index to queue pointer + * rt2x00queue_get_tx_queue - Convert tx queue index to queue pointer * @rt2x00dev: Pointer to &struct rt2x00_dev. * @queue: rt2x00 queue index (see &enum data_queue_qid). + * + * Returns NULL for non tx queues. */ -struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, - const enum data_queue_qid queue); +static inline struct data_queue * +rt2x00queue_get_tx_queue(struct rt2x00_dev *rt2x00dev, + const enum data_queue_qid queue) +{ + if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) + return &rt2x00dev->tx[queue]; + + if (queue == QID_ATIM) + return rt2x00dev->atim; + + return NULL; +} /** * rt2x00queue_get_entry - Get queue entry where the given index points to. @@ -1168,7 +1197,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry); /* * mac80211 handlers. */ -int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); +void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb); int rt2x00mac_start(struct ieee80211_hw *hw); void rt2x00mac_stop(struct ieee80211_hw *hw); int rt2x00mac_add_interface(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index 9597a03242cc..9de9dbe94399 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -121,7 +121,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac, return; if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags)) - rt2x00queue_update_beacon(rt2x00dev, vif, true); + rt2x00queue_update_beacon(rt2x00dev, vif); } static void rt2x00lib_intf_scheduled(struct work_struct *work) @@ -174,7 +174,13 @@ static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac, vif->type != NL80211_IFTYPE_WDS) return; - rt2x00queue_update_beacon(rt2x00dev, vif, true); + /* + * Update the beacon without locking. This is safe on PCI devices + * as they only update the beacon periodically here. This should + * never be called for USB devices. + */ + WARN_ON(rt2x00_is_usb(rt2x00dev)); + rt2x00queue_update_beacon_locked(rt2x00dev, vif); } void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) @@ -183,9 +189,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) return; /* send buffered bc/mc frames out for every bssid */ - ieee80211_iterate_active_interfaces(rt2x00dev->hw, - rt2x00lib_bc_buffer_iter, - rt2x00dev); + ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, + rt2x00lib_bc_buffer_iter, + rt2x00dev); /* * Devices with pre tbtt interrupt don't need to update the beacon * here as they will fetch the next beacon directly prior to @@ -195,9 +201,9 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) return; /* fetch next beacon */ - ieee80211_iterate_active_interfaces(rt2x00dev->hw, - rt2x00lib_beaconupdate_iter, - rt2x00dev); + ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, + rt2x00lib_beaconupdate_iter, + rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); @@ -207,9 +213,9 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev) return; /* fetch next beacon */ - ieee80211_iterate_active_interfaces(rt2x00dev->hw, - rt2x00lib_beaconupdate_iter, - rt2x00dev); + ieee80211_iterate_active_interfaces_atomic(rt2x00dev->hw, + rt2x00lib_beaconupdate_iter, + rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt); @@ -649,7 +655,10 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry, const int channel, const int tx_power, const int value) { - entry->center_freq = ieee80211_channel_to_frequency(channel); + /* XXX: this assumption about the band is wrong for 802.11j */ + entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + entry->center_freq = ieee80211_channel_to_frequency(channel, + entry->band); entry->hw_value = value; entry->max_power = tx_power; entry->max_antenna_gain = 0xff; @@ -812,15 +821,29 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) GFP_KERNEL); if (status) return status; + } - /* tasklet for processing the tx status reports. */ - if (rt2x00dev->ops->lib->txstatus_tasklet) - tasklet_init(&rt2x00dev->txstatus_tasklet, - rt2x00dev->ops->lib->txstatus_tasklet, - (unsigned long)rt2x00dev); - + /* + * Initialize tasklets if used by the driver. Tasklets are + * disabled until the interrupts are turned on. The driver + * has to handle that. + */ +#define RT2X00_TASKLET_INIT(taskletname) \ + if (rt2x00dev->ops->lib->taskletname) { \ + tasklet_init(&rt2x00dev->taskletname, \ + rt2x00dev->ops->lib->taskletname, \ + (unsigned long)rt2x00dev); \ + tasklet_disable(&rt2x00dev->taskletname); \ } + RT2X00_TASKLET_INIT(txstatus_tasklet); + RT2X00_TASKLET_INIT(pretbtt_tasklet); + RT2X00_TASKLET_INIT(tbtt_tasklet); + RT2X00_TASKLET_INIT(rxdone_tasklet); + RT2X00_TASKLET_INIT(autowake_tasklet); + +#undef RT2X00_TASKLET_INIT + /* * Register HW. */ @@ -949,6 +972,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) { int retval = -ENOMEM; + spin_lock_init(&rt2x00dev->irqmask_lock); mutex_init(&rt2x00dev->csr_mutex); set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); @@ -973,8 +997,15 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) BIT(NL80211_IFTYPE_WDS); /* - * Initialize configuration work. + * Initialize work. */ + rt2x00dev->workqueue = + alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0); + if (!rt2x00dev->workqueue) { + retval = -ENOMEM; + goto exit; + } + INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); /* @@ -1033,6 +1064,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) cancel_work_sync(&rt2x00dev->intf_work); cancel_work_sync(&rt2x00dev->rxdone_work); cancel_work_sync(&rt2x00dev->txdone_work); + destroy_workqueue(rt2x00dev->workqueue); /* * Free the tx status fifo. @@ -1043,6 +1075,10 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) * Kill the tx status tasklet. */ tasklet_kill(&rt2x00dev->txstatus_tasklet); + tasklet_kill(&rt2x00dev->pretbtt_tasklet); + tasklet_kill(&rt2x00dev->tbtt_tasklet); + tasklet_kill(&rt2x00dev->rxdone_tasklet); + tasklet_kill(&rt2x00dev->autowake_tasklet); /* * Uninitialize device. diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c index b7ad46ecaa1d..ae1219dffaae 100644 --- a/drivers/net/wireless/rt2x00/rt2x00ht.c +++ b/drivers/net/wireless/rt2x00/rt2x00ht.c @@ -38,12 +38,12 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; if (tx_info->control.sta) - txdesc->mpdu_density = + txdesc->u.ht.mpdu_density = tx_info->control.sta->ht_cap.ampdu_density; - txdesc->ba_size = 7; /* FIXME: What value is needed? */ + txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ - txdesc->stbc = + txdesc->u.ht.stbc = (tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT; /* @@ -51,25 +51,24 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, * mcs rate to be used */ if (txrate->flags & IEEE80211_TX_RC_MCS) { - txdesc->mcs = txrate->idx; + txdesc->u.ht.mcs = txrate->idx; /* * MIMO PS should be set to 1 for STA's using dynamic SM PS * when using more then one tx stream (>MCS7). */ - if (tx_info->control.sta && txdesc->mcs > 7 && + if (tx_info->control.sta && txdesc->u.ht.mcs > 7 && ((tx_info->control.sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> IEEE80211_HT_CAP_SM_PS_SHIFT) == WLAN_HT_CAP_SM_PS_DYNAMIC) __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); } else { - txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs); + txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) - txdesc->mcs |= 0x08; + txdesc->u.ht.mcs |= 0x08; } - /* * This frame is eligible for an AMPDU, however, don't aggregate * frames that are intended to probe a specific tx rate. @@ -79,14 +78,6 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); /* - * Determine HT Mix/Greenfield rate mode - */ - if (txrate->flags & IEEE80211_TX_RC_MCS) - txdesc->rate_mode = RATE_MODE_HT_MIX; - if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) - txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; - - /* * Set 40Mhz mode if necessary (for legacy rates this will * duplicate the frame to both channels). */ @@ -106,11 +97,11 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry, * for frames not transmitted with TXOP_HTTXOP */ if (ieee80211_is_mgmt(hdr->frame_control)) - txdesc->txop = TXOP_BACKOFF; + txdesc->u.ht.txop = TXOP_BACKOFF; else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) - txdesc->txop = TXOP_SIFS; + txdesc->u.ht.txop = TXOP_SIFS; else - txdesc->txop = TXOP_HTTXOP; + txdesc->u.ht.txop = TXOP_HTTXOP; } u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev, diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h index a105c500627b..2d94cbaf5f4a 100644 --- a/drivers/net/wireless/rt2x00/rt2x00lib.h +++ b/drivers/net/wireless/rt2x00/rt2x00lib.h @@ -157,14 +157,30 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, bool local); /** - * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware + * rt2x00queue_update_beacon - Send new beacon from mac80211 + * to hardware. Handles locking by itself (mutex). * @rt2x00dev: Pointer to &struct rt2x00_dev. * @vif: Interface for which the beacon should be updated. - * @enable_beacon: Enable beaconing */ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, - struct ieee80211_vif *vif, - const bool enable_beacon); + struct ieee80211_vif *vif); + +/** + * rt2x00queue_update_beacon_locked - Send new beacon from mac80211 + * to hardware. Caller needs to ensure locking. + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @vif: Interface for which the beacon should be updated. + */ +int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev, + struct ieee80211_vif *vif); + +/** + * rt2x00queue_clear_beacon - Clear beacon in hardware + * @rt2x00dev: Pointer to &struct rt2x00_dev. + * @vif: Interface for which the beacon should be updated. + */ +int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, + struct ieee80211_vif *vif); /** * rt2x00queue_index_inc - Index incrementation function diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c index bfda60eaf4ef..c975b0a12e95 100644 --- a/drivers/net/wireless/rt2x00/rt2x00link.c +++ b/drivers/net/wireless/rt2x00/rt2x00link.c @@ -417,7 +417,8 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev) !test_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags)) return; - schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL); + ieee80211_queue_delayed_work(rt2x00dev->hw, + &link->watchdog_work, WATCHDOG_INTERVAL); } void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev) @@ -441,7 +442,9 @@ static void rt2x00link_watchdog(struct work_struct *work) rt2x00dev->ops->lib->watchdog(rt2x00dev); if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) - schedule_delayed_work(&link->watchdog_work, WATCHDOG_INTERVAL); + ieee80211_queue_delayed_work(rt2x00dev->hw, + &link->watchdog_work, + WATCHDOG_INTERVAL); } void rt2x00link_register(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index f3da051df39e..661c6baad2b9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -99,7 +99,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, return retval; } -int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rt2x00_dev *rt2x00dev = hw->priv; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); @@ -116,13 +116,13 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) goto exit_fail; /* - * Determine which queue to put packet on. + * Use the ATIM queue if appropriate and present. */ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) - queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM); - else - queue = rt2x00queue_get_queue(rt2x00dev, qid); + qid = QID_ATIM; + + queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); if (unlikely(!queue)) { ERROR(rt2x00dev, "Attempt to send packet over invalid queue %d.\n" @@ -139,9 +139,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) * either RTS or CTS-to-self frame and handles everything * inside the hardware. */ - if ((tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS | - IEEE80211_TX_RC_USE_CTS_PROTECT)) && - !rt2x00dev->ops->hw->set_rts_threshold) { + if (!rt2x00dev->ops->hw->set_rts_threshold && + (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS | + IEEE80211_TX_RC_USE_CTS_PROTECT))) { if (rt2x00queue_available(queue) <= 1) goto exit_fail; @@ -149,18 +149,17 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) goto exit_fail; } - if (rt2x00queue_write_tx_frame(queue, skb, false)) + if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false))) goto exit_fail; if (rt2x00queue_threshold(queue)) rt2x00queue_pause_queue(queue); - return NETDEV_TX_OK; + return; exit_fail: ieee80211_stop_queue(rt2x00dev->hw, qid); dev_kfree_skb_any(skb); - return NETDEV_TX_OK; } EXPORT_SYMBOL_GPL(rt2x00mac_tx); @@ -191,7 +190,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw, { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_intf *intf = vif_to_intf(vif); - struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON); + struct data_queue *queue = rt2x00dev->bcn; struct queue_entry *entry = NULL; unsigned int i; @@ -519,11 +518,9 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, crypto.cmd = cmd; - if (sta) { - /* some drivers need the AID */ - crypto.aid = sta->aid; + if (sta) crypto.address = sta->addr; - } else + else crypto.address = bcast_addr; if (crypto.cipher == CIPHER_TKIP) @@ -617,11 +614,47 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, bss_conf->bssid); /* - * Update the beacon. + * Update the beacon. This is only required on USB devices. PCI + * devices fetch beacons periodically. */ - if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) - rt2x00queue_update_beacon(rt2x00dev, vif, - bss_conf->enable_beacon); + if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev)) + rt2x00queue_update_beacon(rt2x00dev, vif); + + /* + * Start/stop beaconing. + */ + if (changes & BSS_CHANGED_BEACON_ENABLED) { + if (!bss_conf->enable_beacon && intf->enable_beacon) { + rt2x00queue_clear_beacon(rt2x00dev, vif); + rt2x00dev->intf_beaconing--; + intf->enable_beacon = false; + + if (rt2x00dev->intf_beaconing == 0) { + /* + * Last beaconing interface disabled + * -> stop beacon queue. + */ + mutex_lock(&intf->beacon_skb_mutex); + rt2x00queue_stop_queue(rt2x00dev->bcn); + mutex_unlock(&intf->beacon_skb_mutex); + } + + + } else if (bss_conf->enable_beacon && !intf->enable_beacon) { + rt2x00dev->intf_beaconing++; + intf->enable_beacon = true; + + if (rt2x00dev->intf_beaconing == 1) { + /* + * First beaconing interface enabled + * -> start beacon queue. + */ + mutex_lock(&intf->beacon_skb_mutex); + rt2x00queue_start_queue(rt2x00dev->bcn); + mutex_unlock(&intf->beacon_skb_mutex); + } + } + } /* * When the association status has changed we must reset the link @@ -657,7 +690,7 @@ int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; - queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); if (unlikely(!queue)) return -EINVAL; diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index ace0b668c04e..4dd82b0b0520 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -160,10 +160,9 @@ int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) /* * Register interrupt handler. */ - status = request_threaded_irq(rt2x00dev->irq, - rt2x00dev->ops->lib->irq_handler, - rt2x00dev->ops->lib->irq_handler_thread, - IRQF_SHARED, rt2x00dev->name, rt2x00dev); + status = request_irq(rt2x00dev->irq, + rt2x00dev->ops->lib->irq_handler, + IRQF_SHARED, rt2x00dev->name, rt2x00dev); if (status) { ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", rt2x00dev->irq, status); diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index ca82b3a91697..4b3c70eeef1f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -221,14 +221,17 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); unsigned long irqflags; - if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) || - unlikely(!tx_info->control.vif)) + if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) + return; + + __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); + + if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags)) return; /* - * Hardware should insert sequence counter. - * FIXME: We insert a software sequence counter first for - * hardware that doesn't support hardware sequence counting. + * The hardware is not able to insert a sequence number. Assign a + * software generated one here. * * This is wrong because beacons are not getting sequence * numbers assigned properly. @@ -246,7 +249,6 @@ static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry, spin_unlock_irqrestore(&intf->seqlock, irqflags); - __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); } static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, @@ -260,6 +262,16 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, unsigned int duration; unsigned int residual; + /* + * Determine with what IFS priority this frame should be send. + * Set ifs to IFS_SIFS when the this is not the first fragment, + * or this fragment came after RTS/CTS. + */ + if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) + txdesc->u.plcp.ifs = IFS_BACKOFF; + else + txdesc->u.plcp.ifs = IFS_SIFS; + /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ data_length = entry->skb->len + 4; data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb); @@ -268,12 +280,12 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, * PLCP setup * Length calculation depends on OFDM/CCK rate. */ - txdesc->signal = hwrate->plcp; - txdesc->service = 0x04; + txdesc->u.plcp.signal = hwrate->plcp; + txdesc->u.plcp.service = 0x04; if (hwrate->flags & DEV_RATE_OFDM) { - txdesc->length_high = (data_length >> 6) & 0x3f; - txdesc->length_low = data_length & 0x3f; + txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; + txdesc->u.plcp.length_low = data_length & 0x3f; } else { /* * Convert length to microseconds. @@ -288,18 +300,18 @@ static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, * Check if we need to set the Length Extension */ if (hwrate->bitrate == 110 && residual <= 30) - txdesc->service |= 0x80; + txdesc->u.plcp.service |= 0x80; } - txdesc->length_high = (duration >> 8) & 0xff; - txdesc->length_low = duration & 0xff; + txdesc->u.plcp.length_high = (duration >> 8) & 0xff; + txdesc->u.plcp.length_low = duration & 0xff; /* * When preamble is enabled we should set the * preamble bit for the signal. */ if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) - txdesc->signal |= 0x08; + txdesc->u.plcp.signal |= 0x08; } } @@ -309,9 +321,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; - struct ieee80211_rate *rate = - ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); - const struct rt2x00_rate *hwrate; + struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; + struct ieee80211_rate *rate; + const struct rt2x00_rate *hwrate = NULL; memset(txdesc, 0, sizeof(*txdesc)); @@ -365,42 +377,42 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry, /* * Beacons and probe responses require the tsf timestamp - * to be inserted into the frame, except for a frame that has been injected - * through a monitor interface. This latter is needed for testing a - * monitor interface. + * to be inserted into the frame. */ - if ((ieee80211_is_beacon(hdr->frame_control) || - ieee80211_is_probe_resp(hdr->frame_control)) && - (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED))) + if (ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); - /* - * Determine with what IFS priority this frame should be send. - * Set ifs to IFS_SIFS when the this is not the first fragment, - * or this fragment came after RTS/CTS. - */ if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && - !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) { + !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); - txdesc->ifs = IFS_BACKOFF; - } else - txdesc->ifs = IFS_SIFS; /* * Determine rate modulation. */ - hwrate = rt2x00_get_rate(rate->hw_value); - txdesc->rate_mode = RATE_MODE_CCK; - if (hwrate->flags & DEV_RATE_OFDM) - txdesc->rate_mode = RATE_MODE_OFDM; + if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) + txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; + else if (txrate->flags & IEEE80211_TX_RC_MCS) + txdesc->rate_mode = RATE_MODE_HT_MIX; + else { + rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); + hwrate = rt2x00_get_rate(rate->hw_value); + if (hwrate->flags & DEV_RATE_OFDM) + txdesc->rate_mode = RATE_MODE_OFDM; + else + txdesc->rate_mode = RATE_MODE_CCK; + } /* * Apply TX descriptor handling by components */ rt2x00crypto_create_tx_descriptor(entry, txdesc); - rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); rt2x00queue_create_tx_descriptor_seq(entry, txdesc); - rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); + + if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags)) + rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); + else + rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); } static int rt2x00queue_write_tx_data(struct queue_entry *entry, @@ -566,13 +578,10 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, return 0; } -int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, - struct ieee80211_vif *vif, - const bool enable_beacon) +int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, + struct ieee80211_vif *vif) { struct rt2x00_intf *intf = vif_to_intf(vif); - struct skb_frame_desc *skbdesc; - struct txentry_desc txdesc; if (unlikely(!intf->beacon)) return -ENOBUFS; @@ -584,17 +593,36 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, */ rt2x00queue_free_skb(intf->beacon); - if (!enable_beacon) { - rt2x00queue_stop_queue(intf->beacon->queue); - mutex_unlock(&intf->beacon_skb_mutex); - return 0; - } + /* + * Clear beacon (single bssid devices don't need to clear the beacon + * since the beacon queue will get stopped anyway). + */ + if (rt2x00dev->ops->lib->clear_beacon) + rt2x00dev->ops->lib->clear_beacon(intf->beacon); + + mutex_unlock(&intf->beacon_skb_mutex); + + return 0; +} + +int rt2x00queue_update_beacon_locked(struct rt2x00_dev *rt2x00dev, + struct ieee80211_vif *vif) +{ + struct rt2x00_intf *intf = vif_to_intf(vif); + struct skb_frame_desc *skbdesc; + struct txentry_desc txdesc; + + if (unlikely(!intf->beacon)) + return -ENOBUFS; + + /* + * Clean up the beacon skb. + */ + rt2x00queue_free_skb(intf->beacon); intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif); - if (!intf->beacon->skb) { - mutex_unlock(&intf->beacon_skb_mutex); + if (!intf->beacon->skb) return -ENOMEM; - } /* * Copy all TX descriptor information into txdesc, @@ -611,13 +639,25 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, skbdesc->entry = intf->beacon; /* - * Send beacon to hardware and enable beacon genaration.. + * Send beacon to hardware. */ rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); + return 0; + +} + +int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, + struct ieee80211_vif *vif) +{ + struct rt2x00_intf *intf = vif_to_intf(vif); + int ret; + + mutex_lock(&intf->beacon_skb_mutex); + ret = rt2x00queue_update_beacon_locked(rt2x00dev, vif); mutex_unlock(&intf->beacon_skb_mutex); - return 0; + return ret; } void rt2x00queue_for_each_entry(struct data_queue *queue, @@ -665,29 +705,6 @@ void rt2x00queue_for_each_entry(struct data_queue *queue, } EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); -struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, - const enum data_queue_qid queue) -{ - int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); - - if (queue == QID_RX) - return rt2x00dev->rx; - - if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) - return &rt2x00dev->tx[queue]; - - if (!rt2x00dev->bcn) - return NULL; - - if (queue == QID_BEACON) - return &rt2x00dev->bcn[0]; - else if (queue == QID_ATIM && atim) - return &rt2x00dev->bcn[1]; - - return NULL; -} -EXPORT_SYMBOL_GPL(rt2x00queue_get_queue); - struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, enum queue_index index) { @@ -885,7 +902,7 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) * The queue flush has failed... */ if (unlikely(!rt2x00queue_empty(queue))) - WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid); + WARNING(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid); /* * Restore the queue to the previous status @@ -1063,7 +1080,7 @@ int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) goto exit; if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) { - status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1], + status = rt2x00queue_alloc_entries(rt2x00dev->atim, rt2x00dev->ops->atim); if (status) goto exit; @@ -1137,6 +1154,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) rt2x00dev->rx = queue; rt2x00dev->tx = &queue[1]; rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; + rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; /* * Initialize queue parameters. @@ -1153,9 +1171,9 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) tx_queue_for_each(rt2x00dev, queue) rt2x00queue_init(rt2x00dev, queue, qid++); - rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON); + rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); if (req_atim) - rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM); + rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); return 0; } diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h index fab8e2687f29..0c8b0c699679 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.h +++ b/drivers/net/wireless/rt2x00/rt2x00queue.h @@ -305,20 +305,27 @@ struct txentry_desc { u16 length; u16 header_length; - u16 length_high; - u16 length_low; - u16 signal; - u16 service; - - u16 mcs; - u16 stbc; - u16 ba_size; - u16 rate_mode; - u16 mpdu_density; + union { + struct { + u16 length_high; + u16 length_low; + u16 signal; + u16 service; + enum ifs ifs; + } plcp; + + struct { + u16 mcs; + u8 stbc; + u8 ba_size; + u8 mpdu_density; + enum txop txop; + } ht; + } u; + + enum rate_modulation rate_mode; short retry_limit; - short ifs; - short txop; enum cipher cipher; u16 key_idx; diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h index e8259ae48ced..6f867eec49cc 100644 --- a/drivers/net/wireless/rt2x00/rt2x00reg.h +++ b/drivers/net/wireless/rt2x00/rt2x00reg.h @@ -85,8 +85,6 @@ enum dev_state { STATE_RADIO_OFF, STATE_RADIO_IRQ_ON, STATE_RADIO_IRQ_OFF, - STATE_RADIO_IRQ_ON_ISR, - STATE_RADIO_IRQ_OFF_ISR, }; /* diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c index 1a9937d5aff6..fbe735f5b352 100644 --- a/drivers/net/wireless/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/rt2x00/rt2x00usb.c @@ -227,7 +227,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb) * Schedule the delayed work for reading the TX status * from the device. */ - ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work); + queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); } static void rt2x00usb_kick_tx_entry(struct queue_entry *entry) @@ -320,7 +320,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb) * Schedule the delayed work for reading the RX status * from the device. */ - ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work); + queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work); } static void rt2x00usb_kick_rx_entry(struct queue_entry *entry) @@ -429,7 +429,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue) * Schedule the completion handler manually, when this * worker function runs, it should cleanup the queue. */ - ieee80211_queue_work(queue->rt2x00dev->hw, completion); + queue_work(queue->rt2x00dev->workqueue, completion); /* * Wait for a little while to give the driver @@ -453,7 +453,7 @@ static void rt2x00usb_watchdog_tx_status(struct data_queue *queue) WARNING(queue->rt2x00dev, "TX queue %d status timed out," " invoke forced tx handler\n", queue->qid); - ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work); + queue_work(queue->rt2x00dev->workqueue, &queue->rt2x00dev->txdone_work); } void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 8de44dd401e0..77e8113b91e1 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -551,26 +551,14 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00intf_conf *conf, const unsigned int flags) { - unsigned int beacon_base; u32 reg; if (flags & CONFIG_UPDATE_TYPE) { /* - * Clear current synchronisation setup. - * For the Beacon base registers, we only need to clear - * the first byte since that byte contains the VALID and OWNER - * bits which (when set to 0) will invalidate the entire beacon. - */ - beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); - rt2x00pci_register_write(rt2x00dev, beacon_base, 0); - - /* * Enable synchronisation. */ rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); - rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, conf->sync); - rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); } @@ -1154,6 +1142,11 @@ static void rt61pci_start_queue(struct data_queue *queue) rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); break; case QID_BEACON: + /* + * Allow the tbtt tasklet to be scheduled. + */ + tasklet_enable(&rt2x00dev->tbtt_tasklet); + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); @@ -1233,6 +1226,11 @@ static void rt61pci_stop_queue(struct data_queue *queue) rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 0); rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); + + /* + * Wait for possibly running tbtt tasklets. + */ + tasklet_disable(&rt2x00dev->tbtt_tasklet); break; default: break; @@ -1719,9 +1717,9 @@ static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev) static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { - int mask = (state == STATE_RADIO_IRQ_OFF) || - (state == STATE_RADIO_IRQ_OFF_ISR); + int mask = (state == STATE_RADIO_IRQ_OFF); u32 reg; + unsigned long flags; /* * When interrupts are being enabled, the interrupt registers @@ -1733,12 +1731,21 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, rt2x00pci_register_read(rt2x00dev, MCU_INT_SOURCE_CSR, ®); rt2x00pci_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg); + + /* + * Enable tasklets. + */ + tasklet_enable(&rt2x00dev->txstatus_tasklet); + tasklet_enable(&rt2x00dev->rxdone_tasklet); + tasklet_enable(&rt2x00dev->autowake_tasklet); } /* * Only toggle the interrupts bits we are going to use. * Non-checked interrupt bits are disabled by default. */ + spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); + rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, ®); rt2x00_set_field32(®, INT_MASK_CSR_TXDONE, mask); rt2x00_set_field32(®, INT_MASK_CSR_RXDONE, mask); @@ -1758,6 +1765,17 @@ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(®, MCU_INT_MASK_CSR_7, mask); rt2x00_set_field32(®, MCU_INT_MASK_CSR_TWAKEUP, mask); rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); + + spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); + + if (state == STATE_RADIO_IRQ_OFF) { + /* + * Ensure that all tasklets are finished. + */ + tasklet_disable(&rt2x00dev->txstatus_tasklet); + tasklet_disable(&rt2x00dev->rxdone_tasklet); + tasklet_disable(&rt2x00dev->autowake_tasklet); + } } static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev) @@ -1833,9 +1851,7 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev, rt61pci_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: - case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: - case STATE_RADIO_IRQ_OFF_ISR: rt61pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: @@ -1882,10 +1898,12 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry, rt2x00_desc_write(txd, 1, word); rt2x00_desc_read(txd, 2, &word); - rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); - rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, + txdesc->u.plcp.length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, + txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 2, word); if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { @@ -1930,7 +1948,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); - rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, @@ -1962,13 +1980,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry, struct queue_entry_priv_pci *entry_priv = entry->priv_data; unsigned int beacon_base; unsigned int padding_len; - u32 reg; + u32 orig_reg, reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + orig_reg = reg; rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); @@ -1986,7 +2005,14 @@ static void rt61pci_write_beacon(struct queue_entry *entry, * Write entire beacon with descriptor and padding to register. */ padding_len = roundup(entry->skb->len, 4) - entry->skb->len; - skb_pad(entry->skb, padding_len); + if (padding_len && skb_pad(entry->skb, padding_len)) { + ERROR(rt2x00dev, "Failure padding beacon, aborting\n"); + /* skb freed by skb_pad() on failure */ + entry->skb = NULL; + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, orig_reg); + return; + } + beacon_base = HW_BEACON_OFFSET(entry->entry_idx); rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, entry_priv->desc, TXINFO_SIZE); @@ -2002,8 +2028,6 @@ static void rt61pci_write_beacon(struct queue_entry *entry, */ rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); - rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); - rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1); rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); @@ -2014,6 +2038,32 @@ static void rt61pci_write_beacon(struct queue_entry *entry, entry->skb = NULL; } +static void rt61pci_clear_beacon(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + u32 reg; + + /* + * Disable beaconing while we are reloading the beacon data, + * otherwise we might be sending out invalid data. + */ + rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); + + /* + * Clear beacon. + */ + rt2x00pci_register_write(rt2x00dev, + HW_BEACON_OFFSET(entry->entry_idx), 0); + + /* + * Enable beaconing again. + */ + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1); + rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); +} + /* * RX control handlers */ @@ -2078,9 +2128,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry, rxdesc->flags |= RX_FLAG_IV_STRIPPED; /* - * FIXME: Legacy driver indicates that the frame does - * contain the Michael Mic. Unfortunately, in rt2x00 - * the MIC seems to be missing completely... + * The hardware has already checked the Michael Mic and has + * stripped it from the frame. Signal this to mac80211. */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; @@ -2143,7 +2192,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) * queue identication number. */ type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE); - queue = rt2x00queue_get_queue(rt2x00dev, type); + queue = rt2x00queue_get_tx_queue(rt2x00dev, type); if (unlikely(!queue)) continue; @@ -2211,61 +2260,77 @@ static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev) rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } -static irqreturn_t rt61pci_interrupt_thread(int irq, void *dev_instance) +static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, + struct rt2x00_field32 irq_field) { - struct rt2x00_dev *rt2x00dev = dev_instance; - u32 reg = rt2x00dev->irqvalue[0]; - u32 reg_mcu = rt2x00dev->irqvalue[1]; + u32 reg; /* - * Handle interrupts, walk through all bits - * and run the tasks, the bits are checked in order of - * priority. + * Enable a single interrupt. The interrupt mask register + * access needs locking. */ + spin_lock_irq(&rt2x00dev->irqmask_lock); - /* - * 1 - Rx ring done interrupt. - */ - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE)) - rt2x00pci_rxdone(rt2x00dev); + rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, ®); + rt2x00_set_field32(®, irq_field, 0); + rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); - /* - * 2 - Tx ring done interrupt. - */ - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE)) - rt61pci_txdone(rt2x00dev); + spin_unlock_irq(&rt2x00dev->irqmask_lock); +} - /* - * 3 - Handle MCU command done. - */ - if (reg_mcu) - rt2x00pci_register_write(rt2x00dev, - M2H_CMD_DONE_CSR, 0xffffffff); +static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev, + struct rt2x00_field32 irq_field) +{ + u32 reg; /* - * 4 - MCU Autowakeup interrupt. + * Enable a single MCU interrupt. The interrupt mask register + * access needs locking. */ - if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP)) - rt61pci_wakeup(rt2x00dev); + spin_lock_irq(&rt2x00dev->irqmask_lock); - /* - * 5 - Beacon done interrupt. - */ - if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE)) - rt2x00lib_beacondone(rt2x00dev); + rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, ®); + rt2x00_set_field32(®, irq_field, 0); + rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); - /* Enable interrupts again. */ - rt2x00dev->ops->lib->set_device_state(rt2x00dev, - STATE_RADIO_IRQ_ON_ISR); - return IRQ_HANDLED; + spin_unlock_irq(&rt2x00dev->irqmask_lock); +} + +static void rt61pci_txstatus_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt61pci_txdone(rt2x00dev); + rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE); +} + +static void rt61pci_tbtt_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt2x00lib_beacondone(rt2x00dev); + rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE); +} + +static void rt61pci_rxdone_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt2x00pci_rxdone(rt2x00dev); + rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE); } +static void rt61pci_autowake_tasklet(unsigned long data) +{ + struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; + rt61pci_wakeup(rt2x00dev); + rt2x00pci_register_write(rt2x00dev, + M2H_CMD_DONE_CSR, 0xffffffff); + rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP); +} static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; - u32 reg_mcu; - u32 reg; + u32 reg_mcu, mask_mcu; + u32 reg, mask; /* * Get the interrupt sources & saved to local variable. @@ -2283,14 +2348,46 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return IRQ_HANDLED; - /* Store irqvalues for use in the interrupt thread. */ - rt2x00dev->irqvalue[0] = reg; - rt2x00dev->irqvalue[1] = reg_mcu; + /* + * Schedule tasklets for interrupt handling. + */ + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE)) + tasklet_schedule(&rt2x00dev->rxdone_tasklet); + + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE)) + tasklet_schedule(&rt2x00dev->txstatus_tasklet); + + if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE)) + tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); + + if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP)) + tasklet_schedule(&rt2x00dev->autowake_tasklet); + + /* + * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits + * for interrupts and interrupt masks we can just use the value of + * INT_SOURCE_CSR to create the interrupt mask. + */ + mask = reg; + mask_mcu = reg_mcu; - /* Disable interrupts, will be enabled again in the interrupt thread. */ - rt2x00dev->ops->lib->set_device_state(rt2x00dev, - STATE_RADIO_IRQ_OFF_ISR); - return IRQ_WAKE_THREAD; + /* + * Disable all interrupts for which a tasklet was scheduled right now, + * the tasklet will reenable the appropriate interrupts. + */ + spin_lock(&rt2x00dev->irqmask_lock); + + rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, ®); + reg |= mask; + rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); + + rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, ®); + reg |= mask_mcu; + rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); + + spin_unlock(&rt2x00dev->irqmask_lock); + + return IRQ_HANDLED; } /* @@ -2819,7 +2916,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, if (queue_idx >= 4) return 0; - queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); /* Update WMM TXOP register */ offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2))); @@ -2884,7 +2981,10 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = { static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { .irq_handler = rt61pci_interrupt, - .irq_handler_thread = rt61pci_interrupt_thread, + .txstatus_tasklet = rt61pci_txstatus_tasklet, + .tbtt_tasklet = rt61pci_tbtt_tasklet, + .rxdone_tasklet = rt61pci_rxdone_tasklet, + .autowake_tasklet = rt61pci_autowake_tasklet, .probe_hw = rt61pci_probe_hw, .get_firmware_name = rt61pci_get_firmware_name, .check_firmware = rt61pci_check_firmware, @@ -2903,6 +3003,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { .stop_queue = rt61pci_stop_queue, .write_tx_desc = rt61pci_write_tx_desc, .write_beacon = rt61pci_write_beacon, + .clear_beacon = rt61pci_clear_beacon, .fill_rxdone = rt61pci_fill_rxdone, .config_shared_key = rt61pci_config_shared_key, .config_pairwise_key = rt61pci_config_pairwise_key, diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 029be3c6c030..02f1148c577e 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -502,26 +502,14 @@ static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00intf_conf *conf, const unsigned int flags) { - unsigned int beacon_base; u32 reg; if (flags & CONFIG_UPDATE_TYPE) { /* - * Clear current synchronisation setup. - * For the Beacon base registers we only need to clear - * the first byte since that byte contains the VALID and OWNER - * bits which (when set to 0) will invalidate the entire beacon. - */ - beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx); - rt2x00usb_register_write(rt2x00dev, beacon_base, 0); - - /* * Enable synchronisation. */ rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®); - rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); rt2x00_set_field32(®, TXRX_CSR9_TSF_SYNC, conf->sync); - rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); } @@ -1440,9 +1428,7 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev, rt73usb_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: - case STATE_RADIO_IRQ_ON_ISR: case STATE_RADIO_IRQ_OFF: - case STATE_RADIO_IRQ_OFF_ISR: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: @@ -1488,7 +1474,7 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); - rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, @@ -1513,10 +1499,12 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry, rt2x00_desc_write(txd, 1, word); rt2x00_desc_read(txd, 2, &word); - rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); - rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, + txdesc->u.plcp.length_low); + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, + txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 2, word); if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { @@ -1547,13 +1535,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry, struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; unsigned int beacon_base; unsigned int padding_len; - u32 reg; + u32 orig_reg, reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®); + orig_reg = reg; rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); @@ -1577,7 +1566,14 @@ static void rt73usb_write_beacon(struct queue_entry *entry, * Write entire beacon with descriptor and padding to register. */ padding_len = roundup(entry->skb->len, 4) - entry->skb->len; - skb_pad(entry->skb, padding_len); + if (padding_len && skb_pad(entry->skb, padding_len)) { + ERROR(rt2x00dev, "Failure padding beacon, aborting\n"); + /* skb freed by skb_pad() on failure */ + entry->skb = NULL; + rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg); + return; + } + beacon_base = HW_BEACON_OFFSET(entry->entry_idx); rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, entry->skb->len + padding_len); @@ -1590,8 +1586,6 @@ static void rt73usb_write_beacon(struct queue_entry *entry, */ rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); - rt2x00_set_field32(®, TXRX_CSR9_TSF_TICKING, 1); - rt2x00_set_field32(®, TXRX_CSR9_TBTT_ENABLE, 1); rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); @@ -1602,6 +1596,33 @@ static void rt73usb_write_beacon(struct queue_entry *entry, entry->skb = NULL; } +static void rt73usb_clear_beacon(struct queue_entry *entry) +{ + struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; + unsigned int beacon_base; + u32 reg; + + /* + * Disable beaconing while we are reloading the beacon data, + * otherwise we might be sending out invalid data. + */ + rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, ®); + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 0); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); + + /* + * Clear beacon. + */ + beacon_base = HW_BEACON_OFFSET(entry->entry_idx); + rt2x00usb_register_write(rt2x00dev, beacon_base, 0); + + /* + * Enable beaconing again. + */ + rt2x00_set_field32(®, TXRX_CSR9_BEACON_GEN, 1); + rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); +} + static int rt73usb_get_tx_data_len(struct queue_entry *entry) { int length; @@ -1698,9 +1719,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry, rxdesc->flags |= RX_FLAG_IV_STRIPPED; /* - * FIXME: Legacy driver indicates that the frame does - * contain the Michael Mic. Unfortunately, in rt2x00 - * the MIC seems to be missing completely... + * The hardware has already checked the Michael Mic and has + * stripped it from the frame. Signal this to mac80211. */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; @@ -2229,7 +2249,7 @@ static int rt73usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx, if (queue_idx >= 4) return 0; - queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); + queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); /* Update WMM TXOP register */ offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2))); @@ -2313,6 +2333,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { .flush_queue = rt2x00usb_flush_queue, .write_tx_desc = rt73usb_write_tx_desc, .write_beacon = rt73usb_write_beacon, + .clear_beacon = rt73usb_clear_beacon, .get_tx_data_len = rt73usb_get_tx_data_len, .fill_rxdone = rt73usb_fill_rxdone, .config_shared_key = rt73usb_config_shared_key, diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c index 5851cbc1e957..80db5cabc9b9 100644 --- a/drivers/net/wireless/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c @@ -146,7 +146,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev) rx_status.freq = dev->conf.channel->center_freq; rx_status.band = dev->conf.channel->band; rx_status.mactime = le64_to_cpu(entry->tsft); - rx_status.flag |= RX_FLAG_TSFT; + rx_status.flag |= RX_FLAG_MACTIME_MPDU; if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; @@ -240,7 +240,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +static void rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; @@ -321,8 +321,6 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) spin_unlock_irqrestore(&priv->lock, flags); rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); - - return 0; } void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam) @@ -687,7 +685,6 @@ static void rtl8180_beacon_work(struct work_struct *work) struct ieee80211_hw *dev = vif_priv->dev; struct ieee80211_mgmt *mgmt; struct sk_buff *skb; - int err = 0; /* don't overflow the tx ring */ if (ieee80211_queue_stopped(dev, 0)) @@ -708,8 +705,7 @@ static void rtl8180_beacon_work(struct work_struct *work) /* TODO: use actual beacon queue */ skb_set_queue_mapping(skb, 0); - err = rtl8180_tx(dev, skb); - WARN_ON(err); + rtl8180_tx(dev, skb); resched: /* diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c index 6b82cac37ee3..1e0be14d10d4 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -227,7 +227,7 @@ static void rtl8187_tx_cb(struct urb *urb) } } -static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) { struct rtl8187_priv *priv = dev->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -241,7 +241,7 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { kfree_skb(skb); - return NETDEV_TX_OK; + return; } flags = skb->len; @@ -309,8 +309,6 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb) kfree_skb(skb); } usb_free_urb(urb); - - return NETDEV_TX_OK; } static void rtl8187_rx_cb(struct urb *urb) @@ -373,7 +371,7 @@ static void rtl8187_rx_cb(struct urb *urb) rx_status.rate_idx = rate; rx_status.freq = dev->conf.channel->center_freq; rx_status.band = dev->conf.channel->band; - rx_status.flag |= RX_FLAG_TSFT; + rx_status.flag |= RX_FLAG_MACTIME_MPDU; if (flags & RTL818X_RX_DESC_FLAG_CRC32_ERR) rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); @@ -871,23 +869,35 @@ static void rtl8187_work(struct work_struct *work) /* The RTL8187 returns the retry count through register 0xFFFA. In * addition, it appears to be a cumulative retry count, not the * value for the current TX packet. When multiple TX entries are - * queued, the retry count will be valid for the last one in the queue. - * The "error" should not matter for purposes of rate setting. */ + * waiting in the queue, the retry count will be the total for all. + * The "error" may matter for purposes of rate setting, but there is + * no other choice with this hardware. + */ struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, work.work); struct ieee80211_tx_info *info; struct ieee80211_hw *dev = priv->dev; static u16 retry; u16 tmp; + u16 avg_retry; + int length; mutex_lock(&priv->conf_mutex); tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA); + length = skb_queue_len(&priv->b_tx_status.queue); + if (unlikely(!length)) + length = 1; + if (unlikely(tmp < retry)) + tmp = retry; + avg_retry = (tmp - retry) / length; while (skb_queue_len(&priv->b_tx_status.queue) > 0) { struct sk_buff *old_skb; old_skb = skb_dequeue(&priv->b_tx_status.queue); info = IEEE80211_SKB_CB(old_skb); - info->status.rates[0].count = tmp - retry + 1; + info->status.rates[0].count = avg_retry + 1; + if (info->status.rates[0].count > RETRY_COUNT) + info->flags &= ~IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(dev, old_skb); } retry = tmp; @@ -933,8 +943,8 @@ static int rtl8187_start(struct ieee80211_hw *dev) rtl818x_iowrite32(priv, &priv->map->TX_CONF, RTL818X_TX_CONF_HW_SEQNUM | RTL818X_TX_CONF_DISREQQSIZE | - (7 << 8 /* short retry limit */) | - (7 << 0 /* long retry limit */) | + (RETRY_COUNT << 8 /* short retry limit */) | + (RETRY_COUNT << 0 /* long retry limit */) | (7 << 21 /* MAX TX DMA */)); rtl8187_init_urbs(dev); rtl8187b_init_status_urb(dev); @@ -1378,6 +1388,9 @@ static int __devinit rtl8187_probe(struct usb_interface *intf, dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_RX_INCLUDES_FCS; + /* Initialize rate-control variables */ + dev->max_rates = 1; + dev->max_rate_tries = RETRY_COUNT; eeprom.data = dev; eeprom.register_read = rtl8187_eeprom_register_read; diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h index 0d7b1423f77b..f1cc90751dbf 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h +++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h @@ -35,6 +35,8 @@ #define RFKILL_MASK_8187_89_97 0x2 #define RFKILL_MASK_8198 0x4 +#define RETRY_COUNT 7 + struct rtl8187_rx_info { struct urb *urb; struct ieee80211_hw *dev; diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig index 7f6573f7f470..ce49e0ce7cad 100644 --- a/drivers/net/wireless/rtlwifi/Kconfig +++ b/drivers/net/wireless/rtlwifi/Kconfig @@ -1,15 +1,33 @@ config RTL8192CE - tristate "Realtek RTL8192CE/RTL8188SE Wireless Network Adapter" - depends on MAC80211 && EXPERIMENTAL + tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter" + depends on MAC80211 && PCI && EXPERIMENTAL select FW_LOADER select RTLWIFI + select RTL8192C_COMMON ---help--- This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe wireless network adapters. If you choose to build it as a module, it will be called rtl8192ce +config RTL8192CU + tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter" + depends on MAC80211 && USB && EXPERIMENTAL + select FW_LOADER + select RTLWIFI + select RTL8192C_COMMON + ---help--- + This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB + wireless network adapters. + + If you choose to build it as a module, it will be called rtl8192cu + config RTLWIFI tristate - depends on RTL8192CE + depends on RTL8192CE || RTL8192CU + default m + +config RTL8192C_COMMON + tristate + depends on RTL8192CE || RTL8192CU default m diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile index 2a7a4384f8ee..ec9393f24799 100644 --- a/drivers/net/wireless/rtlwifi/Makefile +++ b/drivers/net/wireless/rtlwifi/Makefile @@ -5,9 +5,22 @@ rtlwifi-objs := \ core.o \ debug.o \ efuse.o \ - pci.o \ ps.o \ rc.o \ regd.o +rtl8192c_common-objs += \ + +ifneq ($(CONFIG_PCI),) +rtlwifi-objs += pci.o +endif + +ifneq ($(CONFIG_USB),) +rtlwifi-objs += usb.o +endif + +obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/ obj-$(CONFIG_RTL8192CE) += rtl8192ce/ +obj-$(CONFIG_RTL8192CU) += rtl8192cu/ + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c index cf0b73e51fc2..bb0c781f4a1b 100644 --- a/drivers/net/wireless/rtlwifi/base.c +++ b/drivers/net/wireless/rtlwifi/base.c @@ -144,7 +144,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, ht_cap->mcs.rx_mask[1] = 0xFF; ht_cap->mcs.rx_mask[4] = 0x01; - ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15; + ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15); } else if (get_rf_type(rtlphy) == RF_1T1R) { RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T1R\n")); @@ -153,7 +153,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw, ht_cap->mcs.rx_mask[1] = 0x00; ht_cap->mcs.rx_mask[4] = 0x01; - ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7; + ht_cap->mcs.rx_highest = cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS7); } } @@ -283,13 +283,7 @@ int rtl_init_core(struct ieee80211_hw *hw) rtlmac->hw = hw; /* <2> rate control register */ - if (rtl_rate_control_register()) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - ("rtl: Unable to register rtl_rc," - "use default RC !!\n")); - } else { - hw->rate_control_algorithm = "rtl_rc"; - } + hw->rate_control_algorithm = "rtl_rc"; /* * <3> init CRDA must come after init @@ -325,8 +319,6 @@ int rtl_init_core(struct ieee80211_hw *hw) void rtl_deinit_core(struct ieee80211_hw *hw) { - /*RC*/ - rtl_rate_control_unregister(); } void rtl_init_rx_config(struct ieee80211_hw *hw) @@ -399,21 +391,21 @@ static void _rtl_query_protection_mode(struct ieee80211_hw *hw, u8 rate_flag = info->control.rates[0].flags; /* Common Settings */ - tcb_desc->b_rts_stbc = false; - tcb_desc->b_cts_enable = false; + tcb_desc->rts_stbc = false; + tcb_desc->cts_enable = false; tcb_desc->rts_sc = 0; - tcb_desc->b_rts_bw = false; - tcb_desc->b_rts_use_shortpreamble = false; - tcb_desc->b_rts_use_shortgi = false; + tcb_desc->rts_bw = false; + tcb_desc->rts_use_shortpreamble = false; + tcb_desc->rts_use_shortgi = false; if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) { /* Use CTS-to-SELF in protection mode. */ - tcb_desc->b_rts_enable = true; - tcb_desc->b_cts_enable = true; + tcb_desc->rts_enable = true; + tcb_desc->cts_enable = true; tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M]; } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { /* Use RTS-CTS in protection mode. */ - tcb_desc->b_rts_enable = true; + tcb_desc->rts_enable = true; tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M]; } @@ -429,7 +421,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw, if (mac->opmode == NL80211_IFTYPE_STATION) tcb_desc->ratr_index = 0; else if (mac->opmode == NL80211_IFTYPE_ADHOC) { - if (tcb_desc->b_multicast || tcb_desc->b_broadcast) { + if (tcb_desc->multicast || tcb_desc->broadcast) { tcb_desc->hw_rate = rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M]; tcb_desc->use_driver_rate = 1; @@ -439,7 +431,7 @@ static void _rtl_txrate_selectmode(struct ieee80211_hw *hw, } } - if (rtlpriv->dm.b_useramask) { + if (rtlpriv->dm.useramask) { /* TODO we will differentiate adhoc and station futrue */ tcb_desc->mac_id = 0; @@ -461,19 +453,19 @@ static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - tcb_desc->b_packet_bw = false; + tcb_desc->packet_bw = false; if (!mac->bw_40 || !mac->ht_enable) return; - if (tcb_desc->b_multicast || tcb_desc->b_broadcast) + if (tcb_desc->multicast || tcb_desc->broadcast) return; /*use legency rate, shall use 20MHz */ if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M]) return; - tcb_desc->b_packet_bw = true; + tcb_desc->packet_bw = true; } static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw) @@ -498,7 +490,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); struct ieee80211_rate *txrate; - u16 fc = le16_to_cpu(hdr->frame_control); + __le16 fc = hdr->frame_control; memset(tcb_desc, 0, sizeof(struct rtl_tcb_desc)); @@ -545,9 +537,9 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, } if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) - tcb_desc->b_multicast = 1; + tcb_desc->multicast = 1; else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr))) - tcb_desc->b_broadcast = 1; + tcb_desc->broadcast = 1; _rtl_txrate_selectmode(hw, tcb_desc); _rtl_query_bandwidth_mode(hw, tcb_desc); @@ -570,7 +562,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_priv *rtlpriv = rtl_priv(hw); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); - u16 fc = le16_to_cpu(hdr->frame_control); + __le16 fc = hdr->frame_control; if (ieee80211_is_auth(fc)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); @@ -587,7 +579,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); struct rtl_priv *rtlpriv = rtl_priv(hw); - u16 fc = le16_to_cpu(hdr->frame_control); + __le16 fc = hdr->frame_control; u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN)); u8 category; @@ -632,7 +624,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) struct rtl_priv *rtlpriv = rtl_priv(hw); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - u16 fc = le16_to_cpu(hdr->frame_control); + __le16 fc = hdr->frame_control; u16 ether_type; u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb); const struct iphdr *ip; @@ -646,7 +638,6 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len + SNAP_SIZE + PROTOC_TYPE_SIZE); ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE); - ether_type = ntohs(ether_type); if (ETH_P_IP == ether_type) { if (IPPROTO_UDP == ip->protocol) { @@ -690,7 +681,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx) } return true; - } else if (0x86DD == ether_type) { + } else if (ETH_P_IPV6 == ether_type) { + /* IPv6 */ return true; } @@ -777,10 +769,10 @@ void rtl_watchdog_wq_callback(void *data) struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - bool b_busytraffic = false; - bool b_higher_busytraffic = false; - bool b_higher_busyrxtraffic = false; - bool b_higher_busytxtraffic = false; + bool busytraffic = false; + bool higher_busytraffic = false; + bool higher_busyrxtraffic = false; + bool higher_busytxtraffic = false; u8 idx = 0; u32 rx_cnt_inp4eriod = 0; @@ -788,7 +780,7 @@ void rtl_watchdog_wq_callback(void *data) u32 aver_rx_cnt_inperiod = 0; u32 aver_tx_cnt_inperiod = 0; - bool benter_ps = false; + bool enter_ps = false; if (is_hal_stop(rtlhal)) return; @@ -832,29 +824,29 @@ void rtl_watchdog_wq_callback(void *data) /* (2) check traffic busy */ if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100) - b_busytraffic = true; + busytraffic = true; /* Higher Tx/Rx data. */ if (aver_rx_cnt_inperiod > 4000 || aver_tx_cnt_inperiod > 4000) { - b_higher_busytraffic = true; + higher_busytraffic = true; /* Extremely high Rx data. */ if (aver_rx_cnt_inperiod > 5000) - b_higher_busyrxtraffic = true; + higher_busyrxtraffic = true; else - b_higher_busytxtraffic = false; + higher_busytxtraffic = false; } if (((rtlpriv->link_info.num_rx_inperiod + rtlpriv->link_info.num_tx_inperiod) > 8) || (rtlpriv->link_info.num_rx_inperiod > 2)) - benter_ps = false; + enter_ps = false; else - benter_ps = true; + enter_ps = true; /* LeisurePS only work in infra mode. */ - if (benter_ps) + if (enter_ps) rtl_lps_enter(hw); else rtl_lps_leave(hw); @@ -863,9 +855,9 @@ void rtl_watchdog_wq_callback(void *data) rtlpriv->link_info.num_rx_inperiod = 0; rtlpriv->link_info.num_tx_inperiod = 0; - rtlpriv->link_info.b_busytraffic = b_busytraffic; - rtlpriv->link_info.b_higher_busytraffic = b_higher_busytraffic; - rtlpriv->link_info.b_higher_busyrxtraffic = b_higher_busyrxtraffic; + rtlpriv->link_info.busytraffic = busytraffic; + rtlpriv->link_info.higher_busytraffic = higher_busytraffic; + rtlpriv->link_info.higher_busyrxtraffic = higher_busyrxtraffic; } @@ -945,11 +937,16 @@ MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); static int __init rtl_core_module_init(void) { + if (rtl_rate_control_register()) + printk(KERN_ERR "rtlwifi: Unable to register rtl_rc," + "use default RC !!\n"); return 0; } static void __exit rtl_core_module_exit(void) { + /*RC*/ + rtl_rate_control_unregister(); } module_init(rtl_core_module_init); diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h index 3de5a14745f1..043045342bc7 100644 --- a/drivers/net/wireless/rtlwifi/base.h +++ b/drivers/net/wireless/rtlwifi/base.h @@ -30,6 +30,7 @@ #define __RTL_BASE_H__ #define RTL_DUMMY_OFFSET 0 +#define RTL_RX_DESC_SIZE 24 #define RTL_DUMMY_UNIT 8 #define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT) #define RTL_TX_DESC_SIZE 32 @@ -52,46 +53,22 @@ #define FRAME_OFFSET_SEQUENCE 22 #define FRAME_OFFSET_ADDRESS4 24 -#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \ - WRITEEF2BYTE(_hdr, _val) -#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \ - WRITEEF1BYTE(_hdr, _val) -#define SET_80211_HDR_PWR_MGNT(_hdr, _val) \ - SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val) -#define SET_80211_HDR_TO_DS(_hdr, _val) \ - SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val) #define SET_80211_PS_POLL_AID(_hdr, _val) \ - WRITEEF2BYTE(((u8 *)(_hdr)) + 2, _val) + (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val)) #define SET_80211_PS_POLL_BSSID(_hdr, _val) \ - CP_MACADDR(((u8 *)(_hdr)) + 4, (u8 *)(_val)) + memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN) #define SET_80211_PS_POLL_TA(_hdr, _val) \ - CP_MACADDR(((u8 *)(_hdr)) + 10, (u8 *)(_val)) + memcpy(((u8 *)(_hdr)) + 10, (u8 *)(_val), ETH_ALEN) #define SET_80211_HDR_DURATION(_hdr, _val) \ - WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_DURATION, _val) + (*(u16 *)((u8 *)(_hdr) + FRAME_OFFSET_DURATION) = le16_to_cpu(_val)) #define SET_80211_HDR_ADDRESS1(_hdr, _val) \ - CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val)) + memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val), ETH_ALEN) #define SET_80211_HDR_ADDRESS2(_hdr, _val) \ - CP_MACADDR((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val)) + memcpy((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val), ETH_ALEN) #define SET_80211_HDR_ADDRESS3(_hdr, _val) \ - CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val)) -#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \ - WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_SEQUENCE, _val) - -#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) \ - WRITEEF4BYTE(((u8 *)(__phdr)) + 24, __val) -#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \ - WRITEEF4BYTE(((u8 *)(__phdr)) + 28, __val) -#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \ - WRITEEF2BYTE(((u8 *)(__phdr)) + 32, __val) -#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) \ - READEF2BYTE(((u8 *)(__phdr)) + 34) -#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \ - WRITEEF2BYTE(((u8 *)(__phdr)) + 34, __val) -#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \ - SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \ - (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val)))) + memcpy((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val), ETH_ALEN) int rtl_init_core(struct ieee80211_hw *hw); void rtl_deinit_core(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c index d6a924a05654..e4f4aee8f298 100644 --- a/drivers/net/wireless/rtlwifi/core.c +++ b/drivers/net/wireless/rtlwifi/core.c @@ -82,7 +82,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw) mutex_unlock(&rtlpriv->locks.conf_mutex); } -static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -97,11 +97,10 @@ static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) rtlpriv->intf_ops->adapter_tx(hw, skb); - return NETDEV_TX_OK; + return; err_free: dev_kfree_skb_any(skb); - return NETDEV_TX_OK; } static int rtl_op_add_interface(struct ieee80211_hw *hw, @@ -434,9 +433,9 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue, aci = _rtl_get_hal_qnum(queue); mac->ac[aci].aifs = param->aifs; - mac->ac[aci].cw_min = param->cw_min; - mac->ac[aci].cw_max = param->cw_max; - mac->ac[aci].tx_op = param->txop; + mac->ac[aci].cw_min = cpu_to_le16(param->cw_min); + mac->ac[aci].cw_max = cpu_to_le16(param->cw_max); + mac->ac[aci].tx_op = cpu_to_le16(param->txop); memcpy(&mac->edca_param[aci], param, sizeof(*param)); rtlpriv->cfg->ops->set_qos(hw, aci); return 0; @@ -552,6 +551,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE, ("BSS_CHANGED_HT\n")); + rcu_read_lock(); sta = ieee80211_find_sta(mac->vif, mac->bssid); if (sta) { @@ -564,6 +564,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, mac->current_ampdu_factor = sta->ht_cap.ampdu_factor; } + rcu_read_unlock(); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY, (u8 *) (&mac->max_mss_density)); @@ -615,6 +616,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, else mac->mode = WIRELESS_MODE_G; + rcu_read_lock(); sta = ieee80211_find_sta(mac->vif, mac->bssid); if (sta) { @@ -649,6 +651,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, */ } } + rcu_read_unlock(); /*mac80211 just give us CCK rates any time *So we add G rate in basic rates when @@ -666,7 +669,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, (u8 *) (&basic_rates)); - if (rtlpriv->dm.b_useramask) + if (rtlpriv->dm.useramask) rtlpriv->cfg->ops->update_rate_mask(hw, 0); else rtlpriv->cfg->ops->update_rate_table(hw); @@ -681,7 +684,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, */ if (changed & BSS_CHANGED_ASSOC) { if (bss_conf->assoc) { - if (ppsc->b_fwctrl_lps) { + if (ppsc->fwctrl_lps) { u8 mstatus = RT_MEDIA_CONNECT; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_JOINBSSRPT, @@ -689,7 +692,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, ppsc->report_linked = true; } } else { - if (ppsc->b_fwctrl_lps) { + if (ppsc->fwctrl_lps) { u8 mstatus = RT_MEDIA_DISCONNECT; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_JOINBSSRPT, @@ -748,7 +751,8 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw, static int rtl_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 * ssn) + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -817,7 +821,7 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw) /* fix fwlps issue */ rtlpriv->cfg->ops->set_network_type(hw, mac->opmode); - if (rtlpriv->dm.b_useramask) + if (rtlpriv->dm.useramask) rtlpriv->cfg->ops->update_rate_mask(hw, 0); else rtlpriv->cfg->ops->update_rate_table(hw); diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h index 08bdec2ceda4..e4aa8687408c 100644 --- a/drivers/net/wireless/rtlwifi/debug.h +++ b/drivers/net/wireless/rtlwifi/debug.h @@ -105,6 +105,7 @@ #define COMP_MAC80211 BIT(26) #define COMP_REGD BIT(27) #define COMP_CHAN BIT(28) +#define COMP_USB BIT(29) /*-------------------------------------------------------------- Define the rt_print components diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c index 62876cd5c41a..4f92cba6810a 100644 --- a/drivers/net/wireless/rtlwifi/efuse.c +++ b/drivers/net/wireless/rtlwifi/efuse.c @@ -1169,21 +1169,3 @@ static u8 efuse_calculate_word_cnts(u8 word_en) return word_cnts; } -void efuse_reset_loader(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u16 tmp_u2b; - - tmp_u2b = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]); - rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN], - (tmp_u2b & ~(BIT(12)))); - udelay(10000); - rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN], - (tmp_u2b | BIT(12))); - udelay(10000); -} - -bool efuse_program_map(struct ieee80211_hw *hw, char *p_filename, u8 tabletype) -{ - return true; -} diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h index 2d39a4df181b..47774dd4c2a6 100644 --- a/drivers/net/wireless/rtlwifi/efuse.h +++ b/drivers/net/wireless/rtlwifi/efuse.h @@ -117,8 +117,5 @@ extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw); extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw); extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw); extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx); -extern bool efuse_program_map(struct ieee80211_hw *hw, - char *p_filename, u8 tabletype); -extern void efuse_reset_loader(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 1758d4463247..9cd7703c2a30 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -50,7 +50,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw) u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; ppsc->reg_rfps_level = 0; - ppsc->b_support_aspm = 0; + ppsc->support_aspm = 0; /*Update PCI ASPM setting */ ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm; @@ -115,29 +115,29 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw) switch (rtlpci->const_support_pciaspm) { case 0:{ /*Not support ASPM. */ - bool b_support_aspm = false; - ppsc->b_support_aspm = b_support_aspm; + bool support_aspm = false; + ppsc->support_aspm = support_aspm; break; } case 1:{ /*Support ASPM. */ - bool b_support_aspm = true; - bool b_support_backdoor = true; - ppsc->b_support_aspm = b_support_aspm; + bool support_aspm = true; + bool support_backdoor = true; + ppsc->support_aspm = support_aspm; /*if(priv->oem_id == RT_CID_TOSHIBA && !priv->ndis_adapter.amd_l1_patch) - b_support_backdoor = false; */ + support_backdoor = false; */ - ppsc->b_support_backdoor = b_support_backdoor; + ppsc->support_backdoor = support_backdoor; break; } case 2: /*ASPM value set by chipset. */ if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) { - bool b_support_aspm = true; - ppsc->b_support_aspm = b_support_aspm; + bool support_aspm = true; + ppsc->support_aspm = support_aspm; } break; default: @@ -476,9 +476,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio) skb = __skb_dequeue(&ring->queue); pci_unmap_single(rtlpci->pdev, - le32_to_cpu(rtlpriv->cfg->ops-> + rtlpriv->cfg->ops-> get_desc((u8 *) entry, true, - HW_DESC_TXBUFF_ADDR)), + HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE, @@ -557,7 +557,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) return; } else { struct ieee80211_hdr *hdr; - u16 fc; + __le16 fc; struct sk_buff *new_skb = NULL; rtlpriv->cfg->ops->query_rx_desc(hw, &stats, @@ -583,9 +583,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) */ hdr = (struct ieee80211_hdr *)(skb->data); - fc = le16_to_cpu(hdr->frame_control); + fc = hdr->frame_control; - if (!stats.b_crc) { + if (!stats.crc) { memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); @@ -666,7 +666,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) } done: - bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb)); + bufferaddress = (u32)(*((dma_addr_t *) skb->cb)); tmp_one = 1; rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false, HW_DESC_RXBUFF_ADDR, @@ -690,75 +690,6 @@ done: } -void _rtl_pci_tx_interrupt(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - int prio; - - for (prio = 0; prio < RTL_PCI_MAX_TX_QUEUE_COUNT; prio++) { - struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio]; - - while (skb_queue_len(&ring->queue)) { - struct rtl_tx_desc *entry = &ring->desc[ring->idx]; - struct sk_buff *skb; - struct ieee80211_tx_info *info; - u8 own; - - /* - *beacon packet will only use the first - *descriptor defautly, and the own may not - *be cleared by the hardware, and - *beacon will free in prepare beacon - */ - if (prio == BEACON_QUEUE || prio == TXCMD_QUEUE || - prio == HCCA_QUEUE) - break; - - own = (u8)rtlpriv->cfg->ops->get_desc((u8 *)entry, - true, - HW_DESC_OWN); - - if (own) - break; - - skb = __skb_dequeue(&ring->queue); - pci_unmap_single(rtlpci->pdev, - le32_to_cpu(rtlpriv->cfg->ops-> - get_desc((u8 *) entry, - true, - HW_DESC_TXBUFF_ADDR)), - skb->len, PCI_DMA_TODEVICE); - - ring->idx = (ring->idx + 1) % ring->entries; - - info = IEEE80211_SKB_CB(skb); - ieee80211_tx_info_clear_status(info); - - info->flags |= IEEE80211_TX_STAT_ACK; - /*info->status.rates[0].count = 1; */ - - ieee80211_tx_status_irqsafe(hw, skb); - - if ((ring->entries - skb_queue_len(&ring->queue)) - == 2 && prio != BEACON_QUEUE) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - ("more desc left, wake " - "skb_queue@%d,ring->idx = %d," - "skb_queue_len = 0x%d\n", - prio, ring->idx, - skb_queue_len(&ring->queue))); - - ieee80211_wake_queue(hw, - skb_get_queue_mapping - (skb)); - } - - skb = NULL; - } - } -} - static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id) { struct ieee80211_hw *hw = dev_id; @@ -959,17 +890,17 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw, rtlhal->hw = hw; rtlpci->pdev = pdev; - ppsc->b_inactiveps = false; - ppsc->b_leisure_ps = true; - ppsc->b_fwctrl_lps = true; - ppsc->b_reg_fwctrl_lps = 3; + ppsc->inactiveps = false; + ppsc->leisure_ps = true; + ppsc->fwctrl_lps = true; + ppsc->reg_fwctrl_lps = 3; ppsc->reg_max_lps_awakeintvl = 5; - if (ppsc->b_reg_fwctrl_lps == 1) + if (ppsc->reg_fwctrl_lps == 1) ppsc->fwctrl_psmode = FW_PS_MIN_MODE; - else if (ppsc->b_reg_fwctrl_lps == 2) + else if (ppsc->reg_fwctrl_lps == 2) ppsc->fwctrl_psmode = FW_PS_MAX_MODE; - else if (ppsc->b_reg_fwctrl_lps == 3) + else if (ppsc->reg_fwctrl_lps == 3) ppsc->fwctrl_psmode = FW_PS_DTIM_MODE; /*Tx/Rx related var */ @@ -1024,9 +955,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw, ("queue:%d, ring_addr:%p\n", prio, ring)); for (i = 0; i < entries; i++) { - nextdescaddress = cpu_to_le32((u32) dma + - ((i + 1) % entries) * - sizeof(*ring)); + nextdescaddress = (u32) dma + ((i + 1) % entries) * + sizeof(*ring); rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]), true, HW_DESC_TX_NEXTDESC_ADDR, @@ -1090,7 +1020,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw) rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); - bufferaddress = cpu_to_le32(*((dma_addr_t *)skb->cb)); + bufferaddress = (u32)(*((dma_addr_t *)skb->cb)); rtlpriv->cfg->ops->set_desc((u8 *)entry, false, HW_DESC_RXBUFF_ADDR, (u8 *)&bufferaddress); @@ -1121,9 +1051,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw, struct sk_buff *skb = __skb_dequeue(&ring->queue); pci_unmap_single(rtlpci->pdev, - le32_to_cpu(rtlpriv->cfg-> + rtlpriv->cfg-> ops->get_desc((u8 *) entry, true, - HW_DESC_TXBUFF_ADDR)), + HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; @@ -1255,11 +1185,11 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) __skb_dequeue(&ring->queue); pci_unmap_single(rtlpci->pdev, - le32_to_cpu(rtlpriv->cfg->ops-> + rtlpriv->cfg->ops-> get_desc((u8 *) entry, true, - HW_DESC_TXBUFF_ADDR)), + HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; @@ -1273,7 +1203,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) return 0; } -unsigned int _rtl_mac_to_hwqueue(u16 fc, +static unsigned int _rtl_mac_to_hwqueue(__le16 fc, unsigned int mac80211_queue_index) { unsigned int hw_queue_index; @@ -1312,7 +1242,7 @@ out: return hw_queue_index; } -int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -1323,7 +1253,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb) unsigned int queue_index, hw_queue; unsigned long flags; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); - u16 fc = le16_to_cpu(hdr->frame_control); + __le16 fc = hdr->frame_control; u8 *pda_addr = hdr->addr1; struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); /*ssn */ @@ -1429,7 +1359,7 @@ int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb) return 0; } -void rtl_pci_deinit(struct ieee80211_hw *hw) +static void rtl_pci_deinit(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -1444,7 +1374,7 @@ void rtl_pci_deinit(struct ieee80211_hw *hw) } -int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev) +static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev) { struct rtl_priv *rtlpriv = rtl_priv(hw); int err; @@ -1461,7 +1391,7 @@ int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev) return 1; } -int rtl_pci_start(struct ieee80211_hw *hw) +static int rtl_pci_start(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -1496,7 +1426,7 @@ int rtl_pci_start(struct ieee80211_hw *hw) return 0; } -void rtl_pci_stop(struct ieee80211_hw *hw) +static void rtl_pci_stop(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -1547,13 +1477,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev, struct pci_dev *bridge_pdev = pdev->bus->self; u16 venderid; u16 deviceid; - u8 revisionid; u16 irqline; u8 tmp; venderid = pdev->vendor; deviceid = pdev->device; - pci_read_config_byte(pdev, 0x8, &revisionid); pci_read_config_word(pdev, 0x3C, &irqline); if (deviceid == RTL_PCI_8192_DID || @@ -1564,7 +1492,7 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev, deviceid == RTL_PCI_8173_DID || deviceid == RTL_PCI_8172_DID || deviceid == RTL_PCI_8171_DID) { - switch (revisionid) { + switch (pdev->revision) { case RTL_PCI_REVISION_ID_8192PCIE: RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("8192 PCI-E is found - " @@ -1838,7 +1766,7 @@ fail3: ieee80211_free_hw(hw); if (rtlpriv->io.pci_mem_start != 0) - pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start); + pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); fail2: pci_release_regions(pdev); @@ -1888,7 +1816,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev) } if (rtlpriv->io.pci_mem_start != 0) { - pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start); + pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); pci_release_regions(pdev); } diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h index d36a66939958..0caa81429726 100644 --- a/drivers/net/wireless/rtlwifi/pci.h +++ b/drivers/net/wireless/rtlwifi/pci.h @@ -244,34 +244,34 @@ int rtl_pci_resume(struct pci_dev *pdev); static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr) { - return 0xff & readb((u8 *) rtlpriv->io.pci_mem_start + addr); + return readb((u8 __iomem *) rtlpriv->io.pci_mem_start + addr); } static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr) { - return readw((u8 *) rtlpriv->io.pci_mem_start + addr); + return readw((u8 __iomem *) rtlpriv->io.pci_mem_start + addr); } static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr) { - return readl((u8 *) rtlpriv->io.pci_mem_start + addr); + return readl((u8 __iomem *) rtlpriv->io.pci_mem_start + addr); } static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val) { - writeb(val, (u8 *) rtlpriv->io.pci_mem_start + addr); + writeb(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr); } static inline void pci_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val) { - writew(val, (u8 *) rtlpriv->io.pci_mem_start + addr); + writew(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr); } static inline void pci_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val) { - writel(val, (u8 *) rtlpriv->io.pci_mem_start + addr); + writel(val, (u8 __iomem *) rtlpriv->io.pci_mem_start + addr); } static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val) diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index d2326c13449e..6b7e217b6b89 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c @@ -86,7 +86,7 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); enum rf_pwrstate rtstate; - bool b_actionallowed = false; + bool actionallowed = false; u16 rfwait_cnt = 0; unsigned long flag; @@ -139,13 +139,13 @@ no_protect: ppsc->rfoff_reason &= (~changesource); if ((changesource == RF_CHANGE_BY_HW) && - (ppsc->b_hwradiooff == true)) { - ppsc->b_hwradiooff = false; + (ppsc->hwradiooff == true)) { + ppsc->hwradiooff = false; } if (!ppsc->rfoff_reason) { ppsc->rfoff_reason = 0; - b_actionallowed = true; + actionallowed = true; } break; @@ -153,17 +153,17 @@ no_protect: case ERFOFF: if ((changesource == RF_CHANGE_BY_HW) - && (ppsc->b_hwradiooff == false)) { - ppsc->b_hwradiooff = true; + && (ppsc->hwradiooff == false)) { + ppsc->hwradiooff = true; } ppsc->rfoff_reason |= changesource; - b_actionallowed = true; + actionallowed = true; break; case ERFSLEEP: ppsc->rfoff_reason |= changesource; - b_actionallowed = true; + actionallowed = true; break; default: @@ -172,7 +172,7 @@ no_protect: break; } - if (b_actionallowed) + if (actionallowed) rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset); if (!protect_or_not) { @@ -181,7 +181,7 @@ no_protect: spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); } - return b_actionallowed; + return actionallowed; } EXPORT_SYMBOL(rtl_ps_set_rf_state); @@ -191,7 +191,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw) struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - ppsc->b_swrf_processing = true; + ppsc->swrf_processing = true; if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) { if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && @@ -213,7 +213,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw) } } - ppsc->b_swrf_processing = false; + ppsc->swrf_processing = false; } void rtl_ips_nic_off_wq_callback(void *data) @@ -239,13 +239,13 @@ void rtl_ips_nic_off_wq_callback(void *data) if (rtlpriv->sec.being_setkey) return; - if (ppsc->b_inactiveps) { + if (ppsc->inactiveps) { rtstate = ppsc->rfpwr_state; /* *Do not enter IPS in the following conditions: *(1) RF is already OFF or Sleep - *(2) b_swrf_processing (indicates the IPS is still under going) + *(2) swrf_processing (indicates the IPS is still under going) *(3) Connectted (only disconnected can trigger IPS) *(4) IBSS (send Beacon) *(5) AP mode (send Beacon) @@ -253,14 +253,14 @@ void rtl_ips_nic_off_wq_callback(void *data) */ if (rtstate == ERFON && - !ppsc->b_swrf_processing && + !ppsc->swrf_processing && (mac->link_state == MAC80211_NOLINK) && !mac->act_scanning) { RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("IPSEnter(): Turn off RF.\n")); ppsc->inactive_pwrstate = ERFOFF; - ppsc->b_in_powersavemode = true; + ppsc->in_powersavemode = true; /*rtl_pci_reset_trx_ring(hw); */ _rtl_ps_inactive_ps(hw); @@ -290,15 +290,15 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw) spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags); - if (ppsc->b_inactiveps) { + if (ppsc->inactiveps) { rtstate = ppsc->rfpwr_state; if (rtstate != ERFON && - !ppsc->b_swrf_processing && + !ppsc->swrf_processing && ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) { ppsc->inactive_pwrstate = ERFON; - ppsc->b_in_powersavemode = false; + ppsc->in_powersavemode = false; _rtl_ps_inactive_ps(hw); } @@ -370,9 +370,9 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode) * mode and set RPWM to turn RF on. */ - if ((ppsc->b_fwctrl_lps) && (ppsc->b_leisure_ps) && + if ((ppsc->fwctrl_lps) && (ppsc->leisure_ps) && ppsc->report_linked) { - bool b_fw_current_inps; + bool fw_current_inps; if (ppsc->dot11_psmode == EACTIVE) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, ("FW LPS leave ps_mode:%x\n", @@ -385,11 +385,11 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, (u8 *) (&fw_pwrmode)); - b_fw_current_inps = false; + fw_current_inps = false; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, - (u8 *) (&b_fw_current_inps)); + (u8 *) (&fw_current_inps)); } else { if (rtl_get_fwlps_doze(hw)) { @@ -398,10 +398,10 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode) ppsc->fwctrl_psmode)); rpwm_val = 0x02; /* RF off */ - b_fw_current_inps = true; + fw_current_inps = true; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, - (u8 *) (&b_fw_current_inps)); + (u8 *) (&fw_current_inps)); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_H2C_FW_PWRMODE, (u8 *) (&ppsc->fwctrl_psmode)); @@ -425,13 +425,13 @@ void rtl_lps_enter(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); unsigned long flag; - if (!(ppsc->b_fwctrl_lps && ppsc->b_leisure_ps)) + if (!(ppsc->fwctrl_lps && ppsc->leisure_ps)) return; if (rtlpriv->sec.being_setkey) return; - if (rtlpriv->link_info.b_busytraffic) + if (rtlpriv->link_info.busytraffic) return; /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */ @@ -446,7 +446,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw) spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); - if (ppsc->b_leisure_ps) { + if (ppsc->leisure_ps) { /* Idle for a while if we connect to AP a while ago. */ if (mac->cnt_after_linked >= 2) { if (ppsc->dot11_psmode == EACTIVE) { @@ -470,7 +470,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw) spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); - if (ppsc->b_fwctrl_lps && ppsc->b_leisure_ps) { + if (ppsc->fwctrl_lps && ppsc->leisure_ps) { if (ppsc->dot11_psmode != EACTIVE) { /*FIX ME */ diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/Makefile b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile new file mode 100644 index 000000000000..aee42d7ae8a2 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192c/Makefile @@ -0,0 +1,9 @@ +rtl8192c-common-objs := \ + main.o \ + dm_common.o \ + fw_common.o \ + phy_common.o + +obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c-common.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c new file mode 100644 index 000000000000..bb023274414c --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c @@ -0,0 +1,1398 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "dm_common.h" + +struct dig_t dm_digtable; +static struct ps_t dm_pstable; + +static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = { + 0x7f8001fe, + 0x788001e2, + 0x71c001c7, + 0x6b8001ae, + 0x65400195, + 0x5fc0017f, + 0x5a400169, + 0x55400155, + 0x50800142, + 0x4c000130, + 0x47c0011f, + 0x43c0010f, + 0x40000100, + 0x3c8000f2, + 0x390000e4, + 0x35c000d7, + 0x32c000cb, + 0x300000c0, + 0x2d4000b5, + 0x2ac000ab, + 0x288000a2, + 0x26000098, + 0x24000090, + 0x22000088, + 0x20000080, + 0x1e400079, + 0x1c800072, + 0x1b00006c, + 0x19800066, + 0x18000060, + 0x16c0005b, + 0x15800056, + 0x14400051, + 0x1300004c, + 0x12000048, + 0x11000044, + 0x10000040, +}; + +static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { + {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, + {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, + {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, + {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, + {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, + {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, + {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, + {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, + {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, + {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, + {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, + {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, + {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, + {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, + {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, + {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, + {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, + {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, + {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, + {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, + {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, + {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, + {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, + {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, + {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, + {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, + {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, + {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, + {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, + {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, + {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, + {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, + {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} +}; + +static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { + {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, + {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, + {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, + {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, + {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, + {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, + {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, + {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, + {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, + {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, + {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, + {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, + {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, + {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, + {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, + {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, + {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, + {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, + {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, + {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, + {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, + {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, + {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, + {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, + {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, + {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, + {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, + {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, + {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, + {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, + {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, + {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, + {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} +}; + +static void rtl92c_dm_diginit(struct ieee80211_hw *hw) +{ + dm_digtable.dig_enable_flag = true; + dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; + dm_digtable.cur_igvalue = 0x20; + dm_digtable.pre_igvalue = 0x0; + dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; + dm_digtable.presta_connectstate = DIG_STA_DISCONNECT; + dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT; + dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; + dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; + dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; + dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; + dm_digtable.rx_gain_range_max = DM_DIG_MAX; + dm_digtable.rx_gain_range_min = DM_DIG_MIN; + dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; + dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX; + dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN; + dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX; + dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; +} + +static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + long rssi_val_min = 0; + + if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) && + (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) { + if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0) + rssi_val_min = + (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb > + rtlpriv->dm.undecorated_smoothed_pwdb) ? + rtlpriv->dm.undecorated_smoothed_pwdb : + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; + else + rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; + } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT || + dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) { + rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; + } else if (dm_digtable.curmultista_connectstate == + DIG_MULTISTA_CONNECT) { + rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; + } + + return (u8) rssi_val_min; +} + +static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) +{ + u32 ret_value; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); + + ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); + falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); + + ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); + falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff); + falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); + + ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); + falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); + falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + + falsealm_cnt->cnt_rate_illegal + + falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail; + + rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1); + ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); + falsealm_cnt->cnt_cck_fail = ret_value; + + ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3); + falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; + falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail + + falsealm_cnt->cnt_rate_illegal + + falsealm_cnt->cnt_crc8_fail + + falsealm_cnt->cnt_mcs_fail + + falsealm_cnt->cnt_cck_fail); + + rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1); + rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0); + rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0); + rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2); + + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + ("cnt_parity_fail = %d, cnt_rate_illegal = %d, " + "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", + falsealm_cnt->cnt_parity_fail, + falsealm_cnt->cnt_rate_illegal, + falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail)); + + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n", + falsealm_cnt->cnt_ofdm_fail, + falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all)); +} + +static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value_igi = dm_digtable.cur_igvalue; + + if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) + value_igi--; + else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1) + value_igi += 0; + else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2) + value_igi++; + else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2) + value_igi += 2; + if (value_igi > DM_DIG_FA_UPPER) + value_igi = DM_DIG_FA_UPPER; + else if (value_igi < DM_DIG_FA_LOWER) + value_igi = DM_DIG_FA_LOWER; + if (rtlpriv->falsealm_cnt.cnt_all > 10000) + value_igi = 0x32; + + dm_digtable.cur_igvalue = value_igi; + rtl92c_dm_write_dig(hw); +} + +static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) { + if ((dm_digtable.backoff_val - 2) < + dm_digtable.backoff_val_range_min) + dm_digtable.backoff_val = + dm_digtable.backoff_val_range_min; + else + dm_digtable.backoff_val -= 2; + } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) { + if ((dm_digtable.backoff_val + 2) > + dm_digtable.backoff_val_range_max) + dm_digtable.backoff_val = + dm_digtable.backoff_val_range_max; + else + dm_digtable.backoff_val += 2; + } + + if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) > + dm_digtable.rx_gain_range_max) + dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max; + else if ((dm_digtable.rssi_val_min + 10 - + dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min) + dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min; + else + dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 - + dm_digtable.backoff_val; + + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + ("rssi_val_min = %x backoff_val %x\n", + dm_digtable.rssi_val_min, dm_digtable.backoff_val)); + + rtl92c_dm_write_dig(hw); +} + +static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) +{ + static u8 binitialized; /* initialized to false */ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; + bool multi_sta = false; + + if (mac->opmode == NL80211_IFTYPE_ADHOC) + multi_sta = true; + + if ((multi_sta == false) || (dm_digtable.cursta_connectctate != + DIG_STA_DISCONNECT)) { + binitialized = false; + dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; + return; + } else if (binitialized == false) { + binitialized = true; + dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; + dm_digtable.cur_igvalue = 0x20; + rtl92c_dm_write_dig(hw); + } + + if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) { + if ((rssi_strength < dm_digtable.rssi_lowthresh) && + (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) { + + if (dm_digtable.dig_ext_port_stage == + DIG_EXT_PORT_STAGE_2) { + dm_digtable.cur_igvalue = 0x20; + rtl92c_dm_write_dig(hw); + } + + dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1; + } else if (rssi_strength > dm_digtable.rssi_highthresh) { + dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2; + rtl92c_dm_ctrl_initgain_by_fa(hw); + } + } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) { + dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; + dm_digtable.cur_igvalue = 0x20; + rtl92c_dm_write_dig(hw); + } + + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + ("curmultista_connectstate = " + "%x dig_ext_port_stage %x\n", + dm_digtable.curmultista_connectstate, + dm_digtable.dig_ext_port_stage)); +} + +static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + ("presta_connectstate = %x," + " cursta_connectctate = %x\n", + dm_digtable.presta_connectstate, + dm_digtable.cursta_connectctate)); + + if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate + || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT + || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { + + if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) { + dm_digtable.rssi_val_min = + rtl92c_dm_initial_gain_min_pwdb(hw); + rtl92c_dm_ctrl_initgain_by_rssi(hw); + } + } else { + dm_digtable.rssi_val_min = 0; + dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; + dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; + dm_digtable.cur_igvalue = 0x20; + dm_digtable.pre_igvalue = 0; + rtl92c_dm_write_dig(hw); + } +} + +static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { + dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); + + if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { + if (dm_digtable.rssi_val_min <= 25) + dm_digtable.cur_cck_pd_state = + CCK_PD_STAGE_LowRssi; + else + dm_digtable.cur_cck_pd_state = + CCK_PD_STAGE_HighRssi; + } else { + if (dm_digtable.rssi_val_min <= 20) + dm_digtable.cur_cck_pd_state = + CCK_PD_STAGE_LowRssi; + else + dm_digtable.cur_cck_pd_state = + CCK_PD_STAGE_HighRssi; + } + } else { + dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; + } + + if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) { + if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) { + if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800) + dm_digtable.cur_cck_fa_state = + CCK_FA_STAGE_High; + else + dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low; + + if (dm_digtable.pre_cck_fa_state != + dm_digtable.cur_cck_fa_state) { + if (dm_digtable.cur_cck_fa_state == + CCK_FA_STAGE_Low) + rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, + 0x83); + else + rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, + 0xcd); + + dm_digtable.pre_cck_fa_state = + dm_digtable.cur_cck_fa_state; + } + + rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40); + + if (IS_92C_SERIAL(rtlhal->version)) + rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, + MASKBYTE2, 0xd7); + } else { + rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); + rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47); + + if (IS_92C_SERIAL(rtlhal->version)) + rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, + MASKBYTE2, 0xd3); + } + dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state; + } + + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state)); + + RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, + ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version))); +} + +static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + if (mac->act_scanning == true) + return; + + if ((mac->link_state > MAC80211_NOLINK) && + (mac->link_state < MAC80211_LINKED)) + dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT; + else if (mac->link_state >= MAC80211_LINKED) + dm_digtable.cursta_connectctate = DIG_STA_CONNECT; + else + dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; + + rtl92c_dm_initial_gain_sta(hw); + rtl92c_dm_initial_gain_multi_sta(hw); + rtl92c_dm_cck_packet_detection_thresh(hw); + + dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate; + +} + +static void rtl92c_dm_dig(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->dm.dm_initialgain_enable == false) + return; + if (dm_digtable.dig_enable_flag == false) + return; + + rtl92c_dm_ctrl_initgain_by_twoport(hw); + +} + +static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.dynamic_txpower_enable = false; + + rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; + rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; +} + +void rtl92c_dm_write_dig(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, + ("cur_igvalue = 0x%x, " + "pre_igvalue = 0x%x, backoff_val = %d\n", + dm_digtable.cur_igvalue, dm_digtable.pre_igvalue, + dm_digtable.backoff_val)); + + if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) { + rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, + dm_digtable.cur_igvalue); + rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, + dm_digtable.cur_igvalue); + + dm_digtable.pre_igvalue = dm_digtable.cur_igvalue; + } +} +EXPORT_SYMBOL(rtl92c_dm_write_dig); + +static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff; + + u8 h2c_parameter[3] = { 0 }; + + return; + + if (tmpentry_max_pwdb != 0) { + rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = + tmpentry_max_pwdb; + } else { + rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0; + } + + if (tmpentry_min_pwdb != 0xff) { + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = + tmpentry_min_pwdb; + } else { + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0; + } + + h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF); + h2c_parameter[0] = 0; + + rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter); +} + +void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + rtlpriv->dm.current_turbo_edca = false; + rtlpriv->dm.is_any_nonbepkts = false; + rtlpriv->dm.is_cur_rdlstate = false; +} +EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo); + +static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + static u64 last_txok_cnt; + static u64 last_rxok_cnt; + u64 cur_txok_cnt; + u64 cur_rxok_cnt; + u32 edca_be_ul = 0x5ea42b; + u32 edca_be_dl = 0x5ea42b; + + if (mac->opmode == NL80211_IFTYPE_ADHOC) + goto dm_checkedcaturbo_exit; + + if (mac->link_state != MAC80211_LINKED) { + rtlpriv->dm.current_turbo_edca = false; + return; + } + + if (!mac->ht_enable) { /*FIX MERGE */ + if (!(edca_be_ul & 0xffff0000)) + edca_be_ul |= 0x005e0000; + + if (!(edca_be_dl & 0xffff0000)) + edca_be_dl |= 0x005e0000; + } + + if ((!rtlpriv->dm.is_any_nonbepkts) && + (!rtlpriv->dm.disable_framebursting)) { + cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; + cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; + if (cur_rxok_cnt > 4 * cur_txok_cnt) { + if (!rtlpriv->dm.is_cur_rdlstate || + !rtlpriv->dm.current_turbo_edca) { + rtl_write_dword(rtlpriv, + REG_EDCA_BE_PARAM, + edca_be_dl); + rtlpriv->dm.is_cur_rdlstate = true; + } + } else { + if (rtlpriv->dm.is_cur_rdlstate || + !rtlpriv->dm.current_turbo_edca) { + rtl_write_dword(rtlpriv, + REG_EDCA_BE_PARAM, + edca_be_ul); + rtlpriv->dm.is_cur_rdlstate = false; + } + } + rtlpriv->dm.current_turbo_edca = true; + } else { + if (rtlpriv->dm.current_turbo_edca) { + u8 tmp = AC0_BE; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_AC_PARAM, + (u8 *) (&tmp)); + rtlpriv->dm.current_turbo_edca = false; + } + } + +dm_checkedcaturbo_exit: + rtlpriv->dm.is_any_nonbepkts = false; + last_txok_cnt = rtlpriv->stats.txbytesunicast; + last_rxok_cnt = rtlpriv->stats.rxbytesunicast; +} + +static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw + *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 thermalvalue, delta, delta_lck, delta_iqk; + long ele_a, ele_d, temp_cck, val_x, value32; + long val_y, ele_c; + u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old; + int i; + bool is2t = IS_92C_SERIAL(rtlhal->version); + u8 txpwr_level[2] = {0, 0}; + u8 ofdm_min_index = 6, rf; + + rtlpriv->dm.txpower_trackingInit = true; + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n")); + + thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x " + "eeprom_thermalmeter 0x%x\n", + thermalvalue, rtlpriv->dm.thermalvalue, + rtlefuse->eeprom_thermalmeter)); + + rtl92c_phy_ap_calibrate(hw, (thermalvalue - + rtlefuse->eeprom_thermalmeter)); + if (is2t) + rf = 2; + else + rf = 1; + + if (thermalvalue) { + ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, + MASKDWORD) & MASKOFDM_D; + + for (i = 0; i < OFDM_TABLE_LENGTH; i++) { + if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { + ofdm_index_old[0] = (u8) i; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + ("Initial pathA ele_d reg0x%x = 0x%lx, " + "ofdm_index=0x%x\n", + ROFDM0_XATXIQIMBALANCE, + ele_d, ofdm_index_old[0])); + break; + } + } + + if (is2t) { + ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, + MASKDWORD) & MASKOFDM_D; + + for (i = 0; i < OFDM_TABLE_LENGTH; i++) { + if (ele_d == (ofdmswing_table[i] & + MASKOFDM_D)) { + ofdm_index_old[1] = (u8) i; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, + DBG_LOUD, + ("Initial pathB ele_d reg0x%x = " + "0x%lx, ofdm_index=0x%x\n", + ROFDM0_XBTXIQIMBALANCE, ele_d, + ofdm_index_old[1])); + break; + } + } + } + + temp_cck = + rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK; + + for (i = 0; i < CCK_TABLE_LENGTH; i++) { + if (rtlpriv->dm.cck_inch14) { + if (memcmp((void *)&temp_cck, + (void *)&cckswing_table_ch14[i][2], + 4) == 0) { + cck_index_old = (u8) i; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, + DBG_LOUD, + ("Initial reg0x%x = 0x%lx, " + "cck_index=0x%x, ch 14 %d\n", + RCCK0_TXFILTER2, temp_cck, + cck_index_old, + rtlpriv->dm.cck_inch14)); + break; + } + } else { + if (memcmp((void *)&temp_cck, + (void *) + &cckswing_table_ch1ch13[i][2], + 4) == 0) { + cck_index_old = (u8) i; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, + DBG_LOUD, + ("Initial reg0x%x = 0x%lx, " + "cck_index=0x%x, ch14 %d\n", + RCCK0_TXFILTER2, temp_cck, + cck_index_old, + rtlpriv->dm.cck_inch14)); + break; + } + } + } + + if (!rtlpriv->dm.thermalvalue) { + rtlpriv->dm.thermalvalue = + rtlefuse->eeprom_thermalmeter; + rtlpriv->dm.thermalvalue_lck = thermalvalue; + rtlpriv->dm.thermalvalue_iqk = thermalvalue; + for (i = 0; i < rf; i++) + rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i]; + rtlpriv->dm.cck_index = cck_index_old; + } + + delta = (thermalvalue > rtlpriv->dm.thermalvalue) ? + (thermalvalue - rtlpriv->dm.thermalvalue) : + (rtlpriv->dm.thermalvalue - thermalvalue); + + delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ? + (thermalvalue - rtlpriv->dm.thermalvalue_lck) : + (rtlpriv->dm.thermalvalue_lck - thermalvalue); + + delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ? + (thermalvalue - rtlpriv->dm.thermalvalue_iqk) : + (rtlpriv->dm.thermalvalue_iqk - thermalvalue); + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x " + "eeprom_thermalmeter 0x%x delta 0x%x " + "delta_lck 0x%x delta_iqk 0x%x\n", + thermalvalue, rtlpriv->dm.thermalvalue, + rtlefuse->eeprom_thermalmeter, delta, delta_lck, + delta_iqk)); + + if (delta_lck > 1) { + rtlpriv->dm.thermalvalue_lck = thermalvalue; + rtl92c_phy_lc_calibrate(hw); + } + + if (delta > 0 && rtlpriv->dm.txpower_track_control) { + if (thermalvalue > rtlpriv->dm.thermalvalue) { + for (i = 0; i < rf; i++) + rtlpriv->dm.ofdm_index[i] -= delta; + rtlpriv->dm.cck_index -= delta; + } else { + for (i = 0; i < rf; i++) + rtlpriv->dm.ofdm_index[i] += delta; + rtlpriv->dm.cck_index += delta; + } + + if (is2t) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + ("temp OFDM_A_index=0x%x, " + "OFDM_B_index=0x%x," + "cck_index=0x%x\n", + rtlpriv->dm.ofdm_index[0], + rtlpriv->dm.ofdm_index[1], + rtlpriv->dm.cck_index)); + } else { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + ("temp OFDM_A_index=0x%x," + "cck_index=0x%x\n", + rtlpriv->dm.ofdm_index[0], + rtlpriv->dm.cck_index)); + } + + if (thermalvalue > rtlefuse->eeprom_thermalmeter) { + for (i = 0; i < rf; i++) + ofdm_index[i] = + rtlpriv->dm.ofdm_index[i] + + 1; + cck_index = rtlpriv->dm.cck_index + 1; + } else { + for (i = 0; i < rf; i++) + ofdm_index[i] = + rtlpriv->dm.ofdm_index[i]; + cck_index = rtlpriv->dm.cck_index; + } + + for (i = 0; i < rf; i++) { + if (txpwr_level[i] >= 0 && + txpwr_level[i] <= 26) { + if (thermalvalue > + rtlefuse->eeprom_thermalmeter) { + if (delta < 5) + ofdm_index[i] -= 1; + + else + ofdm_index[i] -= 2; + } else if (delta > 5 && thermalvalue < + rtlefuse-> + eeprom_thermalmeter) { + ofdm_index[i] += 1; + } + } else if (txpwr_level[i] >= 27 && + txpwr_level[i] <= 32 + && thermalvalue > + rtlefuse->eeprom_thermalmeter) { + if (delta < 5) + ofdm_index[i] -= 1; + + else + ofdm_index[i] -= 2; + } else if (txpwr_level[i] >= 32 && + txpwr_level[i] <= 38 && + thermalvalue > + rtlefuse->eeprom_thermalmeter + && delta > 5) { + ofdm_index[i] -= 1; + } + } + + if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) { + if (thermalvalue > + rtlefuse->eeprom_thermalmeter) { + if (delta < 5) + cck_index -= 1; + + else + cck_index -= 2; + } else if (delta > 5 && thermalvalue < + rtlefuse->eeprom_thermalmeter) { + cck_index += 1; + } + } else if (txpwr_level[i] >= 27 && + txpwr_level[i] <= 32 && + thermalvalue > + rtlefuse->eeprom_thermalmeter) { + if (delta < 5) + cck_index -= 1; + + else + cck_index -= 2; + } else if (txpwr_level[i] >= 32 && + txpwr_level[i] <= 38 && + thermalvalue > rtlefuse->eeprom_thermalmeter + && delta > 5) { + cck_index -= 1; + } + + for (i = 0; i < rf; i++) { + if (ofdm_index[i] > OFDM_TABLE_SIZE - 1) + ofdm_index[i] = OFDM_TABLE_SIZE - 1; + + else if (ofdm_index[i] < ofdm_min_index) + ofdm_index[i] = ofdm_min_index; + } + + if (cck_index > CCK_TABLE_SIZE - 1) + cck_index = CCK_TABLE_SIZE - 1; + else if (cck_index < 0) + cck_index = 0; + + if (is2t) { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + ("new OFDM_A_index=0x%x, " + "OFDM_B_index=0x%x," + "cck_index=0x%x\n", + ofdm_index[0], ofdm_index[1], + cck_index)); + } else { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + ("new OFDM_A_index=0x%x," + "cck_index=0x%x\n", + ofdm_index[0], cck_index)); + } + } + + if (rtlpriv->dm.txpower_track_control && delta != 0) { + ele_d = + (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22; + val_x = rtlphy->reg_e94; + val_y = rtlphy->reg_e9c; + + if (val_x != 0) { + if ((val_x & 0x00000200) != 0) + val_x = val_x | 0xFFFFFC00; + ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; + + if ((val_y & 0x00000200) != 0) + val_y = val_y | 0xFFFFFC00; + ele_c = ((val_y * ele_d) >> 8) & 0x000003FF; + + value32 = (ele_d << 22) | + ((ele_c & 0x3F) << 16) | ele_a; + + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, + MASKDWORD, value32); + + value32 = (ele_c & 0x000003C0) >> 6; + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, + value32); + + value32 = ((val_x * ele_d) >> 7) & 0x01; + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, + BIT(31), value32); + + value32 = ((val_y * ele_d) >> 7) & 0x01; + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, + BIT(29), value32); + } else { + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, + MASKDWORD, + ofdmswing_table[ofdm_index[0]]); + + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, + 0x00); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, + BIT(31) | BIT(29), 0x00); + } + + if (!rtlpriv->dm.cck_inch14) { + rtl_write_byte(rtlpriv, 0xa22, + cckswing_table_ch1ch13[cck_index] + [0]); + rtl_write_byte(rtlpriv, 0xa23, + cckswing_table_ch1ch13[cck_index] + [1]); + rtl_write_byte(rtlpriv, 0xa24, + cckswing_table_ch1ch13[cck_index] + [2]); + rtl_write_byte(rtlpriv, 0xa25, + cckswing_table_ch1ch13[cck_index] + [3]); + rtl_write_byte(rtlpriv, 0xa26, + cckswing_table_ch1ch13[cck_index] + [4]); + rtl_write_byte(rtlpriv, 0xa27, + cckswing_table_ch1ch13[cck_index] + [5]); + rtl_write_byte(rtlpriv, 0xa28, + cckswing_table_ch1ch13[cck_index] + [6]); + rtl_write_byte(rtlpriv, 0xa29, + cckswing_table_ch1ch13[cck_index] + [7]); + } else { + rtl_write_byte(rtlpriv, 0xa22, + cckswing_table_ch14[cck_index] + [0]); + rtl_write_byte(rtlpriv, 0xa23, + cckswing_table_ch14[cck_index] + [1]); + rtl_write_byte(rtlpriv, 0xa24, + cckswing_table_ch14[cck_index] + [2]); + rtl_write_byte(rtlpriv, 0xa25, + cckswing_table_ch14[cck_index] + [3]); + rtl_write_byte(rtlpriv, 0xa26, + cckswing_table_ch14[cck_index] + [4]); + rtl_write_byte(rtlpriv, 0xa27, + cckswing_table_ch14[cck_index] + [5]); + rtl_write_byte(rtlpriv, 0xa28, + cckswing_table_ch14[cck_index] + [6]); + rtl_write_byte(rtlpriv, 0xa29, + cckswing_table_ch14[cck_index] + [7]); + } + + if (is2t) { + ele_d = (ofdmswing_table[ofdm_index[1]] & + 0xFFC00000) >> 22; + + val_x = rtlphy->reg_eb4; + val_y = rtlphy->reg_ebc; + + if (val_x != 0) { + if ((val_x & 0x00000200) != 0) + val_x = val_x | 0xFFFFFC00; + ele_a = ((val_x * ele_d) >> 8) & + 0x000003FF; + + if ((val_y & 0x00000200) != 0) + val_y = val_y | 0xFFFFFC00; + ele_c = ((val_y * ele_d) >> 8) & + 0x00003FF; + + value32 = (ele_d << 22) | + ((ele_c & 0x3F) << 16) | ele_a; + rtl_set_bbreg(hw, + ROFDM0_XBTXIQIMBALANCE, + MASKDWORD, value32); + + value32 = (ele_c & 0x000003C0) >> 6; + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, + MASKH4BITS, value32); + + value32 = ((val_x * ele_d) >> 7) & 0x01; + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, + BIT(27), value32); + + value32 = ((val_y * ele_d) >> 7) & 0x01; + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, + BIT(25), value32); + } else { + rtl_set_bbreg(hw, + ROFDM0_XBTXIQIMBALANCE, + MASKDWORD, + ofdmswing_table[ofdm_index + [1]]); + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, + MASKH4BITS, 0x00); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, + BIT(27) | BIT(25), 0x00); + } + + } + } + + if (delta_iqk > 3) { + rtlpriv->dm.thermalvalue_iqk = thermalvalue; + rtl92c_phy_iq_calibrate(hw, false); + } + + if (rtlpriv->dm.txpower_track_control) + rtlpriv->dm.thermalvalue = thermalvalue; + } + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n")); + +} + +static void rtl92c_dm_initialize_txpower_tracking_thermalmeter( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.txpower_tracking = true; + rtlpriv->dm.txpower_trackingInit = false; + + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + ("pMgntInfo->txpower_tracking = %d\n", + rtlpriv->dm.txpower_tracking)); +} + +static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw) +{ + rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw); +} + +static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw) +{ + rtl92c_dm_txpower_tracking_callback_thermalmeter(hw); +} + +static void rtl92c_dm_check_txpower_tracking_thermal_meter( + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + static u8 tm_trigger; + + if (!rtlpriv->dm.txpower_tracking) + return; + + if (!tm_trigger) { + rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK, + 0x60); + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + ("Trigger 92S Thermal Meter!!\n")); + tm_trigger = 1; + return; + } else { + RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, + ("Schedule TxPowerTracking direct call!!\n")); + rtl92c_dm_txpower_tracking_directcall(hw); + tm_trigger = 0; + } +} + +void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw) +{ + rtl92c_dm_check_txpower_tracking_thermal_meter(hw); +} +EXPORT_SYMBOL(rtl92c_dm_check_txpower_tracking); + +void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rate_adaptive *p_ra = &(rtlpriv->ra); + + p_ra->ratr_state = DM_RATR_STA_INIT; + p_ra->pre_ratr_state = DM_RATR_STA_INIT; + + if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) + rtlpriv->dm.useramask = true; + else + rtlpriv->dm.useramask = false; + +} +EXPORT_SYMBOL(rtl92c_dm_init_rate_adaptive_mask); + +static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rate_adaptive *p_ra = &(rtlpriv->ra); + u32 low_rssithresh_for_ra, high_rssithresh_for_ra; + + if (is_hal_stop(rtlhal)) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + ("<---- driver is going to unload\n")); + return; + } + + if (!rtlpriv->dm.useramask) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + ("<---- driver does not control rate adaptive mask\n")); + return; + } + + if (mac->link_state == MAC80211_LINKED) { + + switch (p_ra->pre_ratr_state) { + case DM_RATR_STA_HIGH: + high_rssithresh_for_ra = 50; + low_rssithresh_for_ra = 20; + break; + case DM_RATR_STA_MIDDLE: + high_rssithresh_for_ra = 55; + low_rssithresh_for_ra = 20; + break; + case DM_RATR_STA_LOW: + high_rssithresh_for_ra = 50; + low_rssithresh_for_ra = 25; + break; + default: + high_rssithresh_for_ra = 50; + low_rssithresh_for_ra = 20; + break; + } + + if (rtlpriv->dm.undecorated_smoothed_pwdb > + (long)high_rssithresh_for_ra) + p_ra->ratr_state = DM_RATR_STA_HIGH; + else if (rtlpriv->dm.undecorated_smoothed_pwdb > + (long)low_rssithresh_for_ra) + p_ra->ratr_state = DM_RATR_STA_MIDDLE; + else + p_ra->ratr_state = DM_RATR_STA_LOW; + + if (p_ra->pre_ratr_state != p_ra->ratr_state) { + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + ("RSSI = %ld\n", + rtlpriv->dm.undecorated_smoothed_pwdb)); + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + ("RSSI_LEVEL = %d\n", p_ra->ratr_state)); + RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, + ("PreState = %d, CurState = %d\n", + p_ra->pre_ratr_state, p_ra->ratr_state)); + + rtlpriv->cfg->ops->update_rate_mask(hw, + p_ra->ratr_state); + + p_ra->pre_ratr_state = p_ra->ratr_state; + } + } +} + +static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) +{ + dm_pstable.pre_ccastate = CCA_MAX; + dm_pstable.cur_ccasate = CCA_MAX; + dm_pstable.pre_rfstate = RF_MAX; + dm_pstable.cur_rfstate = RF_MAX; + dm_pstable.rssi_val_min = 0; +} + +static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (dm_pstable.rssi_val_min != 0) { + if (dm_pstable.pre_ccastate == CCA_2R) { + if (dm_pstable.rssi_val_min >= 35) + dm_pstable.cur_ccasate = CCA_1R; + else + dm_pstable.cur_ccasate = CCA_2R; + } else { + if (dm_pstable.rssi_val_min <= 30) + dm_pstable.cur_ccasate = CCA_2R; + else + dm_pstable.cur_ccasate = CCA_1R; + } + } else { + dm_pstable.cur_ccasate = CCA_MAX; + } + + if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) { + if (dm_pstable.cur_ccasate == CCA_1R) { + if (get_rf_type(rtlphy) == RF_2T2R) { + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, + MASKBYTE0, 0x13); + rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20); + } else { + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, + MASKBYTE0, 0x23); + rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c); + } + } else { + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, + 0x33); + rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63); + } + dm_pstable.pre_ccastate = dm_pstable.cur_ccasate; + } + + RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n", + (dm_pstable.cur_ccasate == + 0) ? "1RCCA" : "2RCCA")); +} + +void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) +{ + static u8 initialize; + static u32 reg_874, reg_c70, reg_85c, reg_a74; + + if (initialize == 0) { + reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, + MASKDWORD) & 0x1CC000) >> 14; + + reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1, + MASKDWORD) & BIT(3)) >> 3; + + reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, + MASKDWORD) & 0xFF000000) >> 24; + + reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12; + + initialize = 1; + } + + if (!bforce_in_normal) { + if (dm_pstable.rssi_val_min != 0) { + if (dm_pstable.pre_rfstate == RF_NORMAL) { + if (dm_pstable.rssi_val_min >= 30) + dm_pstable.cur_rfstate = RF_SAVE; + else + dm_pstable.cur_rfstate = RF_NORMAL; + } else { + if (dm_pstable.rssi_val_min <= 25) + dm_pstable.cur_rfstate = RF_NORMAL; + else + dm_pstable.cur_rfstate = RF_SAVE; + } + } else { + dm_pstable.cur_rfstate = RF_MAX; + } + } else { + dm_pstable.cur_rfstate = RF_NORMAL; + } + + if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) { + if (dm_pstable.cur_rfstate == RF_SAVE) { + rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, + 0x1C0000, 0x2); + rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0); + rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, + 0xFF000000, 0x63); + rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, + 0xC000, 0x2); + rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3); + rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); + rtl_set_bbreg(hw, 0x818, BIT(28), 0x1); + } else { + rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, + 0x1CC000, reg_874); + rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), + reg_c70); + rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000, + reg_85c); + rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74); + rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); + } + + dm_pstable.pre_rfstate = dm_pstable.cur_rfstate; + } +} +EXPORT_SYMBOL(rtl92c_dm_rf_saving); + +static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (((mac->link_state == MAC80211_NOLINK)) && + (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { + dm_pstable.rssi_val_min = 0; + RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, + ("Not connected to any\n")); + } + + if (mac->link_state == MAC80211_LINKED) { + if (mac->opmode == NL80211_IFTYPE_ADHOC) { + dm_pstable.rssi_val_min = + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; + RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, + ("AP Client PWDB = 0x%lx\n", + dm_pstable.rssi_val_min)); + } else { + dm_pstable.rssi_val_min = + rtlpriv->dm.undecorated_smoothed_pwdb; + RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, + ("STA Default Port PWDB = 0x%lx\n", + dm_pstable.rssi_val_min)); + } + } else { + dm_pstable.rssi_val_min = + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; + + RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, + ("AP Ext Port PWDB = 0x%lx\n", + dm_pstable.rssi_val_min)); + } + + if (IS_92C_SERIAL(rtlhal->version)) + rtl92c_dm_1r_cca(hw); +} + +void rtl92c_dm_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; + rtl92c_dm_diginit(hw); + rtl92c_dm_init_dynamic_txpower(hw); + rtl92c_dm_init_edca_turbo(hw); + rtl92c_dm_init_rate_adaptive_mask(hw); + rtl92c_dm_initialize_txpower_tracking(hw); + rtl92c_dm_init_dynamic_bb_powersaving(hw); +} +EXPORT_SYMBOL(rtl92c_dm_init); + +void rtl92c_dm_watchdog(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool fw_current_inpsmode = false; + bool fw_ps_awake = true; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, + (u8 *) (&fw_current_inpsmode)); + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, + (u8 *) (&fw_ps_awake)); + + if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && + fw_ps_awake) + && (!ppsc->rfchange_inprogress)) { + rtl92c_dm_pwdb_monitor(hw); + rtl92c_dm_dig(hw); + rtl92c_dm_false_alarm_counter_statistics(hw); + rtl92c_dm_dynamic_bb_powersaving(hw); + rtlpriv->cfg->ops->dm_dynamic_txpower(hw); + rtl92c_dm_check_txpower_tracking(hw); + rtl92c_dm_refresh_rate_adaptive_mask(hw); + rtl92c_dm_check_edca_turbo(hw); + + } +} +EXPORT_SYMBOL(rtl92c_dm_watchdog); diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h new file mode 100644 index 000000000000..b9cbb0a3c03f --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h @@ -0,0 +1,204 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#ifndef __RTL92COMMON_DM_H__ +#define __RTL92COMMON_DM_H__ + +#include "../wifi.h" +#include "../rtl8192ce/def.h" +#include "../rtl8192ce/reg.h" +#include "fw_common.h" + +#define HAL_DM_DIG_DISABLE BIT(0) +#define HAL_DM_HIPWR_DISABLE BIT(1) + +#define OFDM_TABLE_LENGTH 37 +#define CCK_TABLE_LENGTH 33 + +#define OFDM_TABLE_SIZE 37 +#define CCK_TABLE_SIZE 33 + +#define BW_AUTO_SWITCH_HIGH_LOW 25 +#define BW_AUTO_SWITCH_LOW_HIGH 30 + +#define DM_DIG_THRESH_HIGH 40 +#define DM_DIG_THRESH_LOW 35 + +#define DM_FALSEALARM_THRESH_LOW 400 +#define DM_FALSEALARM_THRESH_HIGH 1000 + +#define DM_DIG_MAX 0x3e +#define DM_DIG_MIN 0x1e + +#define DM_DIG_FA_UPPER 0x32 +#define DM_DIG_FA_LOWER 0x20 +#define DM_DIG_FA_TH0 0x20 +#define DM_DIG_FA_TH1 0x100 +#define DM_DIG_FA_TH2 0x200 + +#define DM_DIG_BACKOFF_MAX 12 +#define DM_DIG_BACKOFF_MIN -4 +#define DM_DIG_BACKOFF_DEFAULT 10 + +#define RXPATHSELECTION_SS_TH_lOW 30 +#define RXPATHSELECTION_DIFF_TH 18 + +#define DM_RATR_STA_INIT 0 +#define DM_RATR_STA_HIGH 1 +#define DM_RATR_STA_MIDDLE 2 +#define DM_RATR_STA_LOW 3 + +#define CTS2SELF_THVAL 30 +#define REGC38_TH 20 + +#define WAIOTTHVal 25 + +#define TXHIGHPWRLEVEL_NORMAL 0 +#define TXHIGHPWRLEVEL_LEVEL1 1 +#define TXHIGHPWRLEVEL_LEVEL2 2 +#define TXHIGHPWRLEVEL_BT1 3 +#define TXHIGHPWRLEVEL_BT2 4 + +#define DM_TYPE_BYFW 0 +#define DM_TYPE_BYDRIVER 1 + +#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 +#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 + +struct ps_t { + u8 pre_ccastate; + u8 cur_ccasate; + u8 pre_rfstate; + u8 cur_rfstate; + long rssi_val_min; +}; + +struct dig_t { + u8 dig_enable_flag; + u8 dig_ext_port_stage; + u32 rssi_lowthresh; + u32 rssi_highthresh; + u32 fa_lowthresh; + u32 fa_highthresh; + u8 cursta_connectctate; + u8 presta_connectstate; + u8 curmultista_connectstate; + u8 pre_igvalue; + u8 cur_igvalue; + char backoff_val; + char backoff_val_range_max; + char backoff_val_range_min; + u8 rx_gain_range_max; + u8 rx_gain_range_min; + u8 rssi_val_min; + u8 pre_cck_pd_state; + u8 cur_cck_pd_state; + u8 pre_cck_fa_state; + u8 cur_cck_fa_state; + u8 pre_ccastate; + u8 cur_ccasate; +}; + +struct swat_t { + u8 failure_cnt; + u8 try_flag; + u8 stop_trying; + long pre_rssi; + long trying_threshold; + u8 cur_antenna; + u8 pre_antenna; +}; + +enum tag_dynamic_init_gain_operation_type_definition { + DIG_TYPE_THRESH_HIGH = 0, + DIG_TYPE_THRESH_LOW = 1, + DIG_TYPE_BACKOFF = 2, + DIG_TYPE_RX_GAIN_MIN = 3, + DIG_TYPE_RX_GAIN_MAX = 4, + DIG_TYPE_ENABLE = 5, + DIG_TYPE_DISABLE = 6, + DIG_OP_TYPE_MAX +}; + +enum tag_cck_packet_detection_threshold_type_definition { + CCK_PD_STAGE_LowRssi = 0, + CCK_PD_STAGE_HighRssi = 1, + CCK_FA_STAGE_Low = 2, + CCK_FA_STAGE_High = 3, + CCK_PD_STAGE_MAX = 4, +}; + +enum dm_1r_cca_e { + CCA_1R = 0, + CCA_2R = 1, + CCA_MAX = 2, +}; + +enum dm_rf_e { + RF_SAVE = 0, + RF_NORMAL = 1, + RF_MAX = 2, +}; + +enum dm_sw_ant_switch_e { + ANS_ANTENNA_B = 1, + ANS_ANTENNA_A = 2, + ANS_ANTENNA_MAX = 3, +}; + +enum dm_dig_ext_port_alg_e { + DIG_EXT_PORT_STAGE_0 = 0, + DIG_EXT_PORT_STAGE_1 = 1, + DIG_EXT_PORT_STAGE_2 = 2, + DIG_EXT_PORT_STAGE_3 = 3, + DIG_EXT_PORT_STAGE_MAX = 4, +}; + +enum dm_dig_connect_e { + DIG_STA_DISCONNECT = 0, + DIG_STA_CONNECT = 1, + DIG_STA_BEFORE_CONNECT = 2, + DIG_MULTISTA_DISCONNECT = 3, + DIG_MULTISTA_CONNECT = 4, + DIG_CONNECT_MAX +}; + +extern struct dig_t dm_digtable; +void rtl92c_dm_init(struct ieee80211_hw *hw); +void rtl92c_dm_watchdog(struct ieee80211_hw *hw); +void rtl92c_dm_write_dig(struct ieee80211_hw *hw); +void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw); +void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw); +void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); +void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal); +void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw); +void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c index 11dd22b987e7..5ef91374b230 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c @@ -31,10 +31,9 @@ #include "../wifi.h" #include "../pci.h" #include "../base.h" -#include "reg.h" -#include "def.h" -#include "fw.h" -#include "table.h" +#include "../rtl8192ce/reg.h" +#include "../rtl8192ce/def.h" +#include "fw_common.h" static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) { @@ -133,17 +132,15 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - bool is_version_b; u8 *bufferPtr = (u8 *) buffer; RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size)); - is_version_b = IS_CHIP_VER_B(version); - if (is_version_b) { + if (IS_CHIP_VER_B(version)) { u32 pageNums, remainSize; u32 page, offset; - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) + if (IS_HARDWARE_TYPE_8192CE(rtlhal)) _rtl92c_fill_dummy(bufferPtr, &size); pageNums = size / FW_8192C_PAGE_SIZE; @@ -231,14 +228,14 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) u32 fwsize; int err; enum version_8192c version = rtlhal->version; + const struct firmware *firmware; - const struct firmware *firmware = NULL; - + printk(KERN_INFO "rtl8192cu: Loading firmware file %s\n", + rtlpriv->cfg->fw_name); err = request_firmware(&firmware, rtlpriv->cfg->fw_name, rtlpriv->io.dev); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - ("Failed to request firmware!\n")); + printk(KERN_ERR "rtl8192cu: Firmware loading failed\n"); return 1; } @@ -281,6 +278,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw) return 0; } +EXPORT_SYMBOL(rtl92c_download_fw); static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum) { @@ -318,12 +316,12 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, while (true) { spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); - if (rtlhal->b_h2c_setinprogress) { + if (rtlhal->h2c_setinprogress) { RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("H2C set in progress! Wait to set.." "element_id(%d).\n", element_id)); - while (rtlhal->b_h2c_setinprogress) { + while (rtlhal->h2c_setinprogress) { spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); h2c_waitcounter++; @@ -339,7 +337,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, } spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); } else { - rtlhal->b_h2c_setinprogress = true; + rtlhal->h2c_setinprogress = true; spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); break; } @@ -495,7 +493,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, } spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag); - rtlhal->b_h2c_setinprogress = false; + rtlhal->h2c_setinprogress = false; spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag); RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n")); @@ -507,7 +505,7 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); u32 tmp_cmdbuf[2]; - if (rtlhal->bfw_ready == false) { + if (rtlhal->fw_ready == false) { RT_ASSERT(false, ("return H2C cmd because of Fw " "download fail!!!\n")); return; @@ -519,6 +517,7 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, return; } +EXPORT_SYMBOL(rtl92c_fill_h2c_cmd); void rtl92c_firmware_selfreset(struct ieee80211_hw *hw) { @@ -539,6 +538,7 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw) u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); } } +EXPORT_SYMBOL(rtl92c_firmware_selfreset); void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) { @@ -559,39 +559,7 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode) rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode); } - -static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, - struct sk_buff *skb) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - struct rtl8192_tx_ring *ring; - struct rtl_tx_desc *pdesc; - u8 own; - unsigned long flags; - struct sk_buff *pskb = NULL; - - ring = &rtlpci->tx_ring[BEACON_QUEUE]; - - pskb = __skb_dequeue(&ring->queue); - if (pskb) - kfree_skb(pskb); - - spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); - - pdesc = &ring->desc[0]; - own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN); - - rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb); - - __skb_queue_tail(&ring->queue, skb); - - spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); - - rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); - - return true; -} +EXPORT_SYMBOL(rtl92c_set_fw_pwrmode_cmd); #define BEACON_PG 0 /*->1*/ #define PSPOLL_PG 2 @@ -776,7 +744,7 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) memcpy((u8 *) skb_put(skb, totalpacketlen), &reserved_page_packet, totalpacketlen); - rtstatus = _rtl92c_cmd_send_packet(hw, skb); + rtstatus = rtlpriv->cfg->ops->cmd_send_packet(hw, skb); if (rtstatus) b_dlok = true; @@ -793,6 +761,7 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Set RSVD page location to Fw FAIL!!!!!!.\n")); } +EXPORT_SYMBOL(rtl92c_set_fw_rsvdpagepkt); void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) { @@ -802,3 +771,4 @@ void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus) rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm); } +EXPORT_SYMBOL(rtl92c_set_fw_joinbss_report_cmd); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h index 3db33bd14666..3db33bd14666 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/main.c b/drivers/net/wireless/rtlwifi/rtl8192c/main.c new file mode 100644 index 000000000000..2f624fc27499 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192c/main.c @@ -0,0 +1,39 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../wifi.h" + + +MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); +MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); +MODULE_AUTHOR("Georgia <georgia@realtek.com>"); +MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>"); +MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n PCI wireless"); diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c new file mode 100644 index 000000000000..a70228278398 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c @@ -0,0 +1,2042 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../rtl8192ce/reg.h" +#include "../rtl8192ce/def.h" +#include "dm_common.h" +#include "phy_common.h" + +/* Define macro to shorten lines */ +#define MCS_TXPWR mcs_txpwrlevel_origoffset + +u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 returnvalue, originalvalue, bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), " + "bitmask(%#x)\n", regaddr, + bitmask)); + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + returnvalue = (originalvalue & bitmask) >> bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x " + "Addr[0x%x]=0x%x\n", bitmask, + regaddr, originalvalue)); + + return returnvalue; + +} +EXPORT_SYMBOL(rtl92c_phy_query_bb_reg); + +void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 originalvalue, bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x)," + " data(%#x)\n", regaddr, bitmask, + data)); + + if (bitmask != MASKDWORD) { + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + data = ((originalvalue & (~bitmask)) | (data << bitshift)); + } + + rtl_write_dword(rtlpriv, regaddr, data); + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x)," + " data(%#x)\n", regaddr, bitmask, + data)); +} +EXPORT_SYMBOL(rtl92c_phy_set_bb_reg); + +u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset) +{ + RT_ASSERT(false, ("deprecated!\n")); + return 0; +} +EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read); + +void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data) +{ + RT_ASSERT(false, ("deprecated!\n")); +} +EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write); + +u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + u32 newoffset; + u32 tmplong, tmplong2; + u8 rfpi_enable = 0; + u32 retvalue; + + offset &= 0x3f; + newoffset = offset; + if (RT_CANNOT_IO(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n")); + return 0xFFFFFFFF; + } + tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); + if (rfpath == RF90_PATH_A) + tmplong2 = tmplong; + else + tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD); + tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | + (newoffset << 23) | BLSSIREADEDGE; + rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, + tmplong & (~BLSSIREADEDGE)); + mdelay(1); + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2); + mdelay(1); + rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, + tmplong | BLSSIREADEDGE); + mdelay(1); + if (rfpath == RF90_PATH_A) + rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, + BIT(8)); + else if (rfpath == RF90_PATH_B) + rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, + BIT(8)); + if (rfpi_enable) + retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi, + BLSSIREADBACKDATA); + else + retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback, + BLSSIREADBACKDATA); + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n", + rfpath, pphyreg->rflssi_readback, + retvalue)); + return retvalue; +} +EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read); + +void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data) +{ + u32 data_and_addr; + u32 newoffset; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; + + if (RT_CANNOT_IO(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n")); + return; + } + offset &= 0x3f; + newoffset = offset; + data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; + rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n", + rfpath, pphyreg->rf3wire_offset, + data_and_addr)); +} +EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write); + +u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask) +{ + u32 i; + + for (i = 0; i <= 31; i++) { + if (((bitmask >> i) & 0x1) == 1) + break; + } + return i; +} +EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift); + +static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw) +{ + rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2); + rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022); + rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45); + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23); + rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1); + rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2); + rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2); + rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2); + rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2); + rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2); +} +bool rtl92c_phy_rf_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + return rtlpriv->cfg->ops->phy_rf6052_config(hw); +} +EXPORT_SYMBOL(rtl92c_phy_rf_config); + +bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + bool rtstatus; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n")); + rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw, + BASEBAND_CONFIG_PHY_REG); + if (rtstatus != true) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!")); + return false; + } + if (rtlphy->rf_type == RF_1T2R) { + _rtl92c_phy_bb_config_1t(hw); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n")); + } + if (rtlefuse->autoload_failflag == false) { + rtlphy->pwrgroup_cnt = 0; + rtstatus = rtlpriv->cfg->ops->config_bb_with_pgheaderfile(hw, + BASEBAND_CONFIG_PHY_REG); + } + if (rtstatus != true) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!")); + return false; + } + rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw, + BASEBAND_CONFIG_AGC_TAB); + if (rtstatus != true) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n")); + return false; + } + rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER2, + 0x200)); + return true; +} +EXPORT_SYMBOL(_rtl92c_phy_bb8192c_config_parafile); + +void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, + u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (regaddr == RTXAGC_A_RATE18_06) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0] = data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][0])); + } + if (regaddr == RTXAGC_A_RATE54_24) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1] = data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][1])); + } + if (regaddr == RTXAGC_A_CCK1_MCS32) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6] = data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][6])); + } + if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7] = data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][7])); + } + if (regaddr == RTXAGC_A_MCS03_MCS00) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2] = data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][2])); + } + if (regaddr == RTXAGC_A_MCS07_MCS04) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3] = data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][3])); + } + if (regaddr == RTXAGC_A_MCS11_MCS08) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4] = data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][4])); + } + if (regaddr == RTXAGC_A_MCS15_MCS12) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5] = data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][5])); + } + if (regaddr == RTXAGC_B_RATE18_06) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8] = data; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][8])); + } + if (regaddr == RTXAGC_B_RATE54_24) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9] = data; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][9])); + } + + if (regaddr == RTXAGC_B_CCK1_55_MCS32) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14] = data; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][14])); + } + + if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15] = data; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][15])); + } + + if (regaddr == RTXAGC_B_MCS03_MCS00) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10] = data; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][10])); + } + + if (regaddr == RTXAGC_B_MCS07_MCS04) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11] = data; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][11])); + } + + if (regaddr == RTXAGC_B_MCS11_MCS08) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12] = data; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][12])); + } + + if (regaddr == RTXAGC_B_MCS15_MCS12) { + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13] = data; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n", + rtlphy->pwrgroup_cnt, + rtlphy->MCS_TXPWR[rtlphy->pwrgroup_cnt][13])); + + rtlphy->pwrgroup_cnt++; + } +} +EXPORT_SYMBOL(_rtl92c_store_pwrIndex_diffrate_offset); + +void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + rtlphy->default_initialgain[0] = + (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[1] = + (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[2] = + (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); + rtlphy->default_initialgain[3] = + (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Default initial gain (c50=0x%x, " + "c58=0x%x, c60=0x%x, c68=0x%x\n", + rtlphy->default_initialgain[0], + rtlphy->default_initialgain[1], + rtlphy->default_initialgain[2], + rtlphy->default_initialgain[3])); + + rtlphy->framesync = (u8) rtl_get_bbreg(hw, + ROFDM0_RXDETECTOR3, MASKBYTE0); + rtlphy->framesync_c34 = rtl_get_bbreg(hw, + ROFDM0_RXDETECTOR2, MASKDWORD); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Default framesync (0x%x) = 0x%x\n", + ROFDM0_RXDETECTOR3, rtlphy->framesync)); +} + +void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW; + rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB; + rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB; + rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB; + rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE; + rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE; + + rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = + RFPGA0_XA_LSSIPARAMETER; + rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = + RFPGA0_XB_LSSIPARAMETER; + + rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER; + rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER; + + rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE; + rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE; + rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE; + rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE; + + rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1; + rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1; + + rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2; + rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2; + + rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control = + RFPGA0_XAB_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control = + RFPGA0_XAB_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control = + RFPGA0_XCD_SWITCHCONTROL; + rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control = + RFPGA0_XCD_SWITCHCONTROL; + + rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1; + rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1; + + rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2; + rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2; + + rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance = + ROFDM0_XARXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance = + ROFDM0_XBRXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance = + ROFDM0_XCRXIQIMBANLANCE; + rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance = + ROFDM0_XDRXIQIMBALANCE; + + rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE; + rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE; + rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE; + rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; + + rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance = + ROFDM0_XATXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance = + ROFDM0_XBTXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance = + ROFDM0_XCTXIQIMBALANCE; + rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance = + ROFDM0_XDTXIQIMBALANCE; + + rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE; + rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE; + rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE; + rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE; + + rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback = + RFPGA0_XA_LSSIREADBACK; + rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback = + RFPGA0_XB_LSSIREADBACK; + rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback = + RFPGA0_XC_LSSIREADBACK; + rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback = + RFPGA0_XD_LSSIREADBACK; + + rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi = + TRANSCEIVEA_HSPI_READBACK; + rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi = + TRANSCEIVEB_HSPI_READBACK; + +} +EXPORT_SYMBOL(_rtl92c_phy_init_bb_rf_register_definition); + +void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 txpwr_level; + long txpwr_dbm; + + txpwr_level = rtlphy->cur_cck_txpwridx; + txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_B, txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx + + rtlefuse->legacy_ht_txpowerdiff; + if (_rtl92c_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = + _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, + txpwr_level); + txpwr_level = rtlphy->cur_ofdm24g_txpwridx; + if (_rtl92c_phy_txpwr_idx_to_dbm(hw, + WIRELESS_MODE_N_24G, + txpwr_level) > txpwr_dbm) + txpwr_dbm = + _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, + txpwr_level); + *powerlevel = txpwr_dbm; +} + +static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel, + u8 *cckpowerlevel, u8 *ofdmpowerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 index = (channel - 1); + + cckpowerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_cck[RF90_PATH_A][index]; + cckpowerlevel[RF90_PATH_B] = + rtlefuse->txpwrlevel_cck[RF90_PATH_B][index]; + if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) { + ofdmpowerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index]; + ofdmpowerlevel[RF90_PATH_B] = + rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index]; + } else if (get_rf_type(rtlphy) == RF_2T2R) { + ofdmpowerlevel[RF90_PATH_A] = + rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index]; + ofdmpowerlevel[RF90_PATH_B] = + rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index]; + } +} + +static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw, + u8 channel, u8 *cckpowerlevel, + u8 *ofdmpowerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + rtlphy->cur_cck_txpwridx = cckpowerlevel[0]; + rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0]; +} + +void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + u8 cckpowerlevel[2], ofdmpowerlevel[2]; + + if (rtlefuse->txpwr_fromeprom == false) + return; + _rtl92c_get_txpower_index(hw, channel, + &cckpowerlevel[0], &ofdmpowerlevel[0]); + _rtl92c_ccxpower_index_check(hw, + channel, &cckpowerlevel[0], + &ofdmpowerlevel[0]); + rtlpriv->cfg->ops->phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]); + rtlpriv->cfg->ops->phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], + channel); +} +EXPORT_SYMBOL(rtl92c_phy_set_txpower_level); + +bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 idx; + u8 rf_path; + + u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw, + WIRELESS_MODE_B, + power_indbm); + u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw, + WIRELESS_MODE_N_24G, + power_indbm); + if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0) + ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff; + else + ofdmtxpwridx = 0; + RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE, + ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n", + power_indbm, ccktxpwridx, ofdmtxpwridx)); + for (idx = 0; idx < 14; idx++) { + for (rf_path = 0; rf_path < 2; rf_path++) { + rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx; + rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] = + ofdmtxpwridx; + rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] = + ofdmtxpwridx; + } + } + rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); + return true; +} +EXPORT_SYMBOL(rtl92c_phy_update_txpower_dbm); + +void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval) +{ +} +EXPORT_SYMBOL(rtl92c_phy_set_beacon_hw_reg); + +u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + long power_indbm) +{ + u8 txpwridx; + long offset; + + switch (wirelessmode) { + case WIRELESS_MODE_B: + offset = -7; + break; + case WIRELESS_MODE_G: + case WIRELESS_MODE_N_24G: + offset = -8; + break; + default: + offset = -8; + break; + } + + if ((power_indbm - offset) > 0) + txpwridx = (u8) ((power_indbm - offset) * 2); + else + txpwridx = 0; + + if (txpwridx > MAX_TXPWR_IDX_NMODE_92S) + txpwridx = MAX_TXPWR_IDX_NMODE_92S; + + return txpwridx; +} +EXPORT_SYMBOL(_rtl92c_phy_dbm_to_txpwr_Idx); + +long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx) +{ + long offset; + long pwrout_dbm; + + switch (wirelessmode) { + case WIRELESS_MODE_B: + offset = -7; + break; + case WIRELESS_MODE_G: + case WIRELESS_MODE_N_24G: + offset = -8; + break; + default: + offset = -8; + break; + } + pwrout_dbm = txpwridx / 2 + offset; + return pwrout_dbm; +} +EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm); + +void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + enum io_type iotype; + + if (!is_hal_stop(rtlhal)) { + switch (operation) { + case SCAN_OPT_BACKUP: + iotype = IO_CMD_PAUSE_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *)&iotype); + + break; + case SCAN_OPT_RESTORE: + iotype = IO_CMD_RESUME_DM_BY_SCAN; + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_IO_CMD, + (u8 *)&iotype); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("Unknown Scan Backup operation.\n")); + break; + } + } +} +EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup); + +void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp_bw = rtlphy->current_chan_bw; + + if (rtlphy->set_bwmode_inprogress) + return; + rtlphy->set_bwmode_inprogress = true; + if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) + rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw); + else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + ("FALSE driver sleep or unload\n")); + rtlphy->set_bwmode_inprogress = false; + rtlphy->current_chan_bw = tmp_bw; + } +} +EXPORT_SYMBOL(rtl92c_phy_set_bw_mode); + +void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 delay; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, + ("switch to channel%d\n", rtlphy->current_channel)); + if (is_hal_stop(rtlhal)) + return; + do { + if (!rtlphy->sw_chnl_inprogress) + break; + if (!_rtl92c_phy_sw_chnl_step_by_step + (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage, + &rtlphy->sw_chnl_step, &delay)) { + if (delay > 0) + mdelay(delay); + else + continue; + } else + rtlphy->sw_chnl_inprogress = false; + break; + } while (true); + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n")); +} +EXPORT_SYMBOL(rtl92c_phy_sw_chnl_callback); + +u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (rtlphy->sw_chnl_inprogress) + return 0; + if (rtlphy->set_bwmode_inprogress) + return 0; + RT_ASSERT((rtlphy->current_channel <= 14), + ("WIRELESS_MODE_G but channel>14")); + rtlphy->sw_chnl_inprogress = true; + rtlphy->sw_chnl_stage = 0; + rtlphy->sw_chnl_step = 0; + if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { + rtl92c_phy_sw_chnl_callback(hw); + RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, + ("sw_chnl_inprogress false schdule workitem\n")); + rtlphy->sw_chnl_inprogress = false; + } else { + RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, + ("sw_chnl_inprogress false driver sleep or" + " unload\n")); + rtlphy->sw_chnl_inprogress = false; + } + return 1; +} +EXPORT_SYMBOL(rtl92c_phy_sw_chnl); + +static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, u8 *step, + u32 *delay) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct swchnlcmd precommoncmd[MAX_PRECMD_CNT]; + u32 precommoncmdcnt; + struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT]; + u32 postcommoncmdcnt; + struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT]; + u32 rfdependcmdcnt; + struct swchnlcmd *currentcmd = NULL; + u8 rfpath; + u8 num_total_rfpath = rtlphy->num_total_rfpath; + + precommoncmdcnt = 0; + _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, + MAX_PRECMD_CNT, + CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0); + _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, + MAX_PRECMD_CNT, CMDID_END, 0, 0, 0); + + postcommoncmdcnt = 0; + + _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, + MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0); + + rfdependcmdcnt = 0; + + RT_ASSERT((channel >= 1 && channel <= 14), + ("illegal channel for Zebra: %d\n", channel)); + + _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, + MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, + RF_CHNLBW, channel, 10); + + _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, + MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, + 0); + + do { + switch (*stage) { + case 0: + currentcmd = &precommoncmd[*step]; + break; + case 1: + currentcmd = &rfdependcmd[*step]; + break; + case 2: + currentcmd = &postcommoncmd[*step]; + break; + } + + if (currentcmd->cmdid == CMDID_END) { + if ((*stage) == 2) { + return true; + } else { + (*stage)++; + (*step) = 0; + continue; + } + } + + switch (currentcmd->cmdid) { + case CMDID_SET_TXPOWEROWER_LEVEL: + rtl92c_phy_set_txpower_level(hw, channel); + break; + case CMDID_WRITEPORT_ULONG: + rtl_write_dword(rtlpriv, currentcmd->para1, + currentcmd->para2); + break; + case CMDID_WRITEPORT_USHORT: + rtl_write_word(rtlpriv, currentcmd->para1, + (u16) currentcmd->para2); + break; + case CMDID_WRITEPORT_UCHAR: + rtl_write_byte(rtlpriv, currentcmd->para1, + (u8) currentcmd->para2); + break; + case CMDID_RF_WRITEREG: + for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) { + rtlphy->rfreg_chnlval[rfpath] = + ((rtlphy->rfreg_chnlval[rfpath] & + 0xfffffc00) | currentcmd->para2); + + rtl_set_rfreg(hw, (enum radio_path)rfpath, + currentcmd->para1, + RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[rfpath]); + } + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("switch case not process\n")); + break; + } + + break; + } while (true); + + (*delay) = currentcmd->msdelay; + (*step)++; + return false; +} + +static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, + u32 cmdtableidx, u32 cmdtablesz, + enum swchnlcmd_id cmdid, + u32 para1, u32 para2, u32 msdelay) +{ + struct swchnlcmd *pcmd; + + if (cmdtable == NULL) { + RT_ASSERT(false, ("cmdtable cannot be NULL.\n")); + return false; + } + + if (cmdtableidx >= cmdtablesz) + return false; + + pcmd = cmdtable + cmdtableidx; + pcmd->cmdid = cmdid; + pcmd->para1 = para1; + pcmd->para2 = para2; + pcmd->msdelay = msdelay; + return true; +} + +bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath) +{ + return true; +} +EXPORT_SYMBOL(rtl8192_phy_check_is_legal_rfpath); + +static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) +{ + u32 reg_eac, reg_e94, reg_e9c, reg_ea4; + u8 result = 0x00; + + rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f); + rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f); + rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102); + rtl_set_bbreg(hw, 0xe3c, MASKDWORD, + config_pathb ? 0x28160202 : 0x28160502); + + if (config_pathb) { + rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22); + rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22); + rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102); + rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202); + } + + rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1); + rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000); + + mdelay(IQK_DELAY_TIME); + + reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD); + reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD); + reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD); + reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD); + + if (!(reg_eac & BIT(28)) && + (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && + (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + else + return result; + + if (!(reg_eac & BIT(27)) && + (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) && + (((reg_eac & 0x03FF0000) >> 16) != 0x36)) + result |= 0x02; + return result; +} + +static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw) +{ + u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc; + u8 result = 0x00; + + rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002); + rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000); + mdelay(IQK_DELAY_TIME); + reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD); + reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD); + reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD); + reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD); + reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD); + if (!(reg_eac & BIT(31)) && + (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) && + (((reg_ebc & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + else + return result; + + if (!(reg_eac & BIT(30)) && + (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) && + (((reg_ecc & 0x03FF0000) >> 16) != 0x36)) + result |= 0x02; + return result; +} + +static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, + bool iqk_ok, long result[][8], + u8 final_candidate, bool btxonly) +{ + u32 oldval_0, x, tx0_a, reg; + long y, tx0_c; + + if (final_candidate == 0xFF) + return; + else if (iqk_ok) { + oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, + MASKDWORD) >> 22) & 0x3FF; + x = result[final_candidate][0]; + if ((x & 0x00000200) != 0) + x = x | 0xFFFFFC00; + tx0_a = (x * oldval_0) >> 8; + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31), + ((x * oldval_0 >> 7) & 0x1)); + y = result[final_candidate][1]; + if ((y & 0x00000200) != 0) + y = y | 0xFFFFFC00; + tx0_c = (y * oldval_0) >> 8; + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, + ((tx0_c & 0x3C0) >> 6)); + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000, + (tx0_c & 0x3F)); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29), + ((y * oldval_0 >> 7) & 0x1)); + if (btxonly) + return; + reg = result[final_candidate][2]; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg); + reg = result[final_candidate][3] & 0x3F; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg); + reg = (result[final_candidate][3] >> 6) & 0xF; + rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg); + } +} + +static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw, + bool iqk_ok, long result[][8], + u8 final_candidate, bool btxonly) +{ + u32 oldval_1, x, tx1_a, reg; + long y, tx1_c; + + if (final_candidate == 0xFF) + return; + else if (iqk_ok) { + oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, + MASKDWORD) >> 22) & 0x3FF; + x = result[final_candidate][4]; + if ((x & 0x00000200) != 0) + x = x | 0xFFFFFC00; + tx1_a = (x * oldval_1) >> 8; + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27), + ((x * oldval_1 >> 7) & 0x1)); + y = result[final_candidate][5]; + if ((y & 0x00000200) != 0) + y = y | 0xFFFFFC00; + tx1_c = (y * oldval_1) >> 8; + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, + ((tx1_c & 0x3C0) >> 6)); + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000, + (tx1_c & 0x3F)); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25), + ((y * oldval_1 >> 7) & 0x1)); + if (btxonly) + return; + reg = result[final_candidate][6]; + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg); + reg = result[final_candidate][7] & 0x3F; + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg); + reg = (result[final_candidate][7] >> 6) & 0xF; + rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg); + } +} + +static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw, + u32 *addareg, u32 *addabackup, + u32 registernum) +{ + u32 i; + + for (i = 0; i < registernum; i++) + addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD); +} + +static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) + macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]); + macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]); +} + +static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw, + u32 *addareg, u32 *addabackup, + u32 regiesternum) +{ + u32 i; + + for (i = 0; i < regiesternum; i++) + rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]); +} + +static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) + rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]); + rtl_write_dword(rtlpriv, macreg[i], macbackup[i]); +} + +static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw, + u32 *addareg, bool is_patha_on, bool is2t) +{ + u32 pathOn; + u32 i; + + pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4; + if (false == is2t) { + pathOn = 0x0bdb25a0; + rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0); + } else { + rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn); + } + + for (i = 1; i < IQK_ADDA_REG_NUM; i++) + rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn); +} + +static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw, + u32 *macreg, u32 *macbackup) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + rtl_write_byte(rtlpriv, macreg[0], 0x3F); + + for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) + rtl_write_byte(rtlpriv, macreg[i], + (u8) (macbackup[i] & (~BIT(3)))); + rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5)))); +} + +static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw) +{ + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0); + rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000); + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000); +} + +static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode) +{ + u32 mode; + + mode = pi_mode ? 0x01000100 : 0x01000000; + rtl_set_bbreg(hw, 0x820, MASKDWORD, mode); + rtl_set_bbreg(hw, 0x828, MASKDWORD, mode); +} + +static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw, + long result[][8], u8 c1, u8 c2) +{ + u32 i, j, diff, simularity_bitmap, bound; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + u8 final_candidate[2] = { 0xFF, 0xFF }; + bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version); + + if (is2t) + bound = 8; + else + bound = 4; + + simularity_bitmap = 0; + + for (i = 0; i < bound; i++) { + diff = (result[c1][i] > result[c2][i]) ? + (result[c1][i] - result[c2][i]) : + (result[c2][i] - result[c1][i]); + + if (diff > MAX_TOLERANCE) { + if ((i == 2 || i == 6) && !simularity_bitmap) { + if (result[c1][i] + result[c1][i + 1] == 0) + final_candidate[(i / 4)] = c2; + else if (result[c2][i] + result[c2][i + 1] == 0) + final_candidate[(i / 4)] = c1; + else + simularity_bitmap = simularity_bitmap | + (1 << i); + } else + simularity_bitmap = + simularity_bitmap | (1 << i); + } + } + + if (simularity_bitmap == 0) { + for (i = 0; i < (bound / 4); i++) { + if (final_candidate[i] != 0xFF) { + for (j = i * 4; j < (i + 1) * 4 - 2; j++) + result[3][j] = + result[final_candidate[i]][j]; + bresult = false; + } + } + return bresult; + } else if (!(simularity_bitmap & 0x0F)) { + for (i = 0; i < 4; i++) + result[3][i] = result[c1][i]; + return false; + } else if (!(simularity_bitmap & 0xF0) && is2t) { + for (i = 4; i < 8; i++) + result[3][i] = result[c1][i]; + return false; + } else { + return false; + } + +} + +static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, + long result[][8], u8 t, bool is2t) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 i; + u8 patha_ok, pathb_ok; + u32 adda_reg[IQK_ADDA_REG_NUM] = { + 0x85c, 0xe6c, 0xe70, 0xe74, + 0xe78, 0xe7c, 0xe80, 0xe84, + 0xe88, 0xe8c, 0xed0, 0xed4, + 0xed8, 0xedc, 0xee0, 0xeec + }; + + u32 iqk_mac_reg[IQK_MAC_REG_NUM] = { + 0x522, 0x550, 0x551, 0x040 + }; + + const u32 retrycount = 2; + + u32 bbvalue; + + if (t == 0) { + bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD); + + _rtl92c_phy_save_adda_registers(hw, adda_reg, + rtlphy->adda_backup, 16); + _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + } + _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t); + if (t == 0) { + rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER1, + BIT(8)); + } + if (!rtlphy->rfpi_enable) + _rtl92c_phy_pi_mode_switch(hw, true); + if (t == 0) { + rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD); + rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD); + rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD); + } + rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600); + rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4); + rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000); + if (is2t) { + rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000); + rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000); + } + _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000); + if (is2t) + rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000); + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000); + rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800); + for (i = 0; i < retrycount; i++) { + patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t); + if (patha_ok == 0x03) { + result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) & + 0x3FF0000) >> 16; + result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & + 0x3FF0000) >> 16; + result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) & + 0x3FF0000) >> 16; + result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) & + 0x3FF0000) >> 16; + break; + } else if (i == (retrycount - 1) && patha_ok == 0x01) + result[t][0] = (rtl_get_bbreg(hw, 0xe94, + MASKDWORD) & 0x3FF0000) >> + 16; + result[t][1] = + (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16; + + } + + if (is2t) { + _rtl92c_phy_path_a_standby(hw); + _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t); + for (i = 0; i < retrycount; i++) { + pathb_ok = _rtl92c_phy_path_b_iqk(hw); + if (pathb_ok == 0x03) { + result[t][4] = (rtl_get_bbreg(hw, + 0xeb4, + MASKDWORD) & + 0x3FF0000) >> 16; + result[t][5] = + (rtl_get_bbreg(hw, 0xebc, MASKDWORD) & + 0x3FF0000) >> 16; + result[t][6] = + (rtl_get_bbreg(hw, 0xec4, MASKDWORD) & + 0x3FF0000) >> 16; + result[t][7] = + (rtl_get_bbreg(hw, 0xecc, MASKDWORD) & + 0x3FF0000) >> 16; + break; + } else if (i == (retrycount - 1) && pathb_ok == 0x01) { + result[t][4] = (rtl_get_bbreg(hw, + 0xeb4, + MASKDWORD) & + 0x3FF0000) >> 16; + } + result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) & + 0x3FF0000) >> 16; + } + } + rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04); + rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874); + rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08); + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0); + rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3); + if (is2t) + rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3); + if (t != 0) { + if (!rtlphy->rfpi_enable) + _rtl92c_phy_pi_mode_switch(hw, false); + _rtl92c_phy_reload_adda_registers(hw, adda_reg, + rtlphy->adda_backup, 16); + _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + } +} + +static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, + char delta, bool is2t) +{ + /* This routine is deliberately dummied out for later fixes */ +#if 0 + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + + u32 reg_d[PATH_NUM]; + u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound; + + u32 bb_backup[APK_BB_REG_NUM]; + u32 bb_reg[APK_BB_REG_NUM] = { + 0x904, 0xc04, 0x800, 0xc08, 0x874 + }; + u32 bb_ap_mode[APK_BB_REG_NUM] = { + 0x00000020, 0x00a05430, 0x02040000, + 0x000800e4, 0x00204000 + }; + u32 bb_normal_ap_mode[APK_BB_REG_NUM] = { + 0x00000020, 0x00a05430, 0x02040000, + 0x000800e4, 0x22204000 + }; + + u32 afe_backup[APK_AFE_REG_NUM]; + u32 afe_reg[APK_AFE_REG_NUM] = { + 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, + 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c, + 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, + 0xeec + }; + + u32 mac_backup[IQK_MAC_REG_NUM]; + u32 mac_reg[IQK_MAC_REG_NUM] = { + 0x522, 0x550, 0x551, 0x040 + }; + + u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = { + {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c}, + {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e} + }; + + u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = { + {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c}, + {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c} + }; + + u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = { + {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d}, + {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050} + }; + + u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = { + {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}, + {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a} + }; + + u32 afe_on_off[PATH_NUM] = { + 0x04db25a4, 0x0b1b25a4 + }; + + u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c }; + + u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 }; + + u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 }; + + u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 }; + + const char apk_delta_mapping[APK_BB_REG_NUM][13] = { + {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, + {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, + {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, + {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6}, + {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0} + }; + + const u32 apk_normal_setting_value_1[13] = { + 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28, + 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3, + 0x12680000, 0x00880000, 0x00880000 + }; + + const u32 apk_normal_setting_value_2[16] = { + 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3, + 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025, + 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008, + 0x00050006 + }; + + const u32 apk_result[PATH_NUM][APK_BB_REG_NUM]; + + long bb_offset, delta_v, delta_offset; + + if (!is2t) + pathbound = 1; + + for (index = 0; index < PATH_NUM; index++) { + apk_offset[index] = apk_normal_offset[index]; + apk_value[index] = apk_normal_value[index]; + afe_on_off[index] = 0x6fdb25a4; + } + + for (index = 0; index < APK_BB_REG_NUM; index++) { + for (path = 0; path < pathbound; path++) { + apk_rf_init_value[path][index] = + apk_normal_rf_init_value[path][index]; + apk_rf_value_0[path][index] = + apk_normal_rf_value_0[path][index]; + } + bb_ap_mode[index] = bb_normal_ap_mode[index]; + + apkbound = 6; + } + + for (index = 0; index < APK_BB_REG_NUM; index++) { + if (index == 0) + continue; + bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD); + } + + _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup); + + _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16); + + for (path = 0; path < pathbound; path++) { + if (path == RF90_PATH_A) { + offset = 0xb00; + for (index = 0; index < 11; index++) { + rtl_set_bbreg(hw, offset, MASKDWORD, + apk_normal_setting_value_1 + [index]); + + offset += 0x04; + } + + rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000); + + offset = 0xb68; + for (; index < 13; index++) { + rtl_set_bbreg(hw, offset, MASKDWORD, + apk_normal_setting_value_1 + [index]); + + offset += 0x04; + } + + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000); + + offset = 0xb00; + for (index = 0; index < 16; index++) { + rtl_set_bbreg(hw, offset, MASKDWORD, + apk_normal_setting_value_2 + [index]); + + offset += 0x04; + } + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000); + } else if (path == RF90_PATH_B) { + offset = 0xb70; + for (index = 0; index < 10; index++) { + rtl_set_bbreg(hw, offset, MASKDWORD, + apk_normal_setting_value_1 + [index]); + + offset += 0x04; + } + rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000); + rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000); + + offset = 0xb68; + index = 11; + for (; index < 13; index++) { + rtl_set_bbreg(hw, offset, MASKDWORD, + apk_normal_setting_value_1 + [index]); + + offset += 0x04; + } + + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000); + + offset = 0xb60; + for (index = 0; index < 16; index++) { + rtl_set_bbreg(hw, offset, MASKDWORD, + apk_normal_setting_value_2 + [index]); + + offset += 0x04; + } + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000); + } + + reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path, + 0xd, MASKDWORD); + + for (index = 0; index < APK_AFE_REG_NUM; index++) + rtl_set_bbreg(hw, afe_reg[index], MASKDWORD, + afe_on_off[path]); + + if (path == RF90_PATH_A) { + for (index = 0; index < APK_BB_REG_NUM; index++) { + if (index == 0) + continue; + rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, + bb_ap_mode[index]); + } + } + + _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup); + + if (path == 0) { + rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000); + } else { + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD, + 0x10000); + rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD, + 0x1000f); + rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD, + 0x20103); + } + + delta_offset = ((delta + 14) / 2); + if (delta_offset < 0) + delta_offset = 0; + else if (delta_offset > 12) + delta_offset = 12; + + for (index = 0; index < APK_BB_REG_NUM; index++) { + if (index != 1) + continue; + + tmpreg = apk_rf_init_value[path][index]; + + if (!rtlefuse->apk_thermalmeterignore) { + bb_offset = (tmpreg & 0xF0000) >> 16; + + if (!(tmpreg & BIT(15))) + bb_offset = -bb_offset; + + delta_v = + apk_delta_mapping[index][delta_offset]; + + bb_offset += delta_v; + + if (bb_offset < 0) { + tmpreg = tmpreg & (~BIT(15)); + bb_offset = -bb_offset; + } else { + tmpreg = tmpreg | BIT(15); + } + + tmpreg = + (tmpreg & 0xFFF0FFFF) | (bb_offset << 16); + } + + rtl_set_rfreg(hw, (enum radio_path)path, 0xc, + MASKDWORD, 0x8992e); + rtl_set_rfreg(hw, (enum radio_path)path, 0x0, + MASKDWORD, apk_rf_value_0[path][index]); + rtl_set_rfreg(hw, (enum radio_path)path, 0xd, + MASKDWORD, tmpreg); + + i = 0; + do { + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000); + rtl_set_bbreg(hw, apk_offset[path], + MASKDWORD, apk_value[0]); + RTPRINT(rtlpriv, FINIT, INIT_IQK, + ("PHY_APCalibrate() offset 0x%x " + "value 0x%x\n", + apk_offset[path], + rtl_get_bbreg(hw, apk_offset[path], + MASKDWORD))); + + mdelay(3); + + rtl_set_bbreg(hw, apk_offset[path], + MASKDWORD, apk_value[1]); + RTPRINT(rtlpriv, FINIT, INIT_IQK, + ("PHY_APCalibrate() offset 0x%x " + "value 0x%x\n", + apk_offset[path], + rtl_get_bbreg(hw, apk_offset[path], + MASKDWORD))); + + mdelay(20); + + rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000); + + if (path == RF90_PATH_A) + tmpreg = rtl_get_bbreg(hw, 0xbd8, + 0x03E00000); + else + tmpreg = rtl_get_bbreg(hw, 0xbd8, + 0xF8000000); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + ("PHY_APCalibrate() offset " + "0xbd8[25:21] %x\n", tmpreg)); + + i++; + + } while (tmpreg > apkbound && i < 4); + + apk_result[path][index] = tmpreg; + } + } + + _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup); + + for (index = 0; index < APK_BB_REG_NUM; index++) { + if (index == 0) + continue; + rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]); + } + + _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16); + + for (path = 0; path < pathbound; path++) { + rtl_set_rfreg(hw, (enum radio_path)path, 0xd, + MASKDWORD, reg_d[path]); + + if (path == RF90_PATH_B) { + rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD, + 0x1000f); + rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD, + 0x20101); + } + + if (apk_result[path][1] > 6) + apk_result[path][1] = 6; + } + + for (path = 0; path < pathbound; path++) { + rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD, + ((apk_result[path][1] << 15) | + (apk_result[path][1] << 10) | + (apk_result[path][1] << 5) | + apk_result[path][1])); + + if (path == RF90_PATH_A) + rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD, + ((apk_result[path][1] << 15) | + (apk_result[path][1] << 10) | + (0x00 << 5) | 0x05)); + else + rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD, + ((apk_result[path][1] << 15) | + (apk_result[path][1] << 10) | + (0x02 << 5) | 0x05)); + + rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD, + ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) | + 0x08)); + + } + + rtlphy->apk_done = true; +#endif +} + +static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, + bool bmain, bool is2t) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (is_hal_stop(rtlhal)) { + rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01); + rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); + } + if (is2t) { + if (bmain) + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(6), 0x1); + else + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(5) | BIT(6), 0x2); + } else { + if (bmain) + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2); + else + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1); + + } +} + +#undef IQK_ADDA_REG_NUM +#undef IQK_DELAY_TIME + +void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + long result[4][8]; + u8 i, final_candidate; + bool patha_ok, pathb_ok; + long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, + reg_ecc, reg_tmp = 0; + bool is12simular, is13simular, is23simular; + bool start_conttx = false, singletone = false; + u32 iqk_bb_reg[10] = { + ROFDM0_XARXIQIMBALANCE, + ROFDM0_XBRXIQIMBALANCE, + ROFDM0_ECCATHRESHOLD, + ROFDM0_AGCRSSITABLE, + ROFDM0_XATXIQIMBALANCE, + ROFDM0_XBTXIQIMBALANCE, + ROFDM0_XCTXIQIMBALANCE, + ROFDM0_XCTXAFE, + ROFDM0_XDTXAFE, + ROFDM0_RXIQEXTANTA + }; + + if (recovery) { + _rtl92c_phy_reload_adda_registers(hw, + iqk_bb_reg, + rtlphy->iqk_bb_backup, 10); + return; + } + if (start_conttx || singletone) + return; + for (i = 0; i < 8; i++) { + result[0][i] = 0; + result[1][i] = 0; + result[2][i] = 0; + result[3][i] = 0; + } + final_candidate = 0xff; + patha_ok = false; + pathb_ok = false; + is12simular = false; + is23simular = false; + is13simular = false; + for (i = 0; i < 3; i++) { + if (IS_92C_SERIAL(rtlhal->version)) + _rtl92c_phy_iq_calibrate(hw, result, i, true); + else + _rtl92c_phy_iq_calibrate(hw, result, i, false); + if (i == 1) { + is12simular = _rtl92c_phy_simularity_compare(hw, + result, 0, + 1); + if (is12simular) { + final_candidate = 0; + break; + } + } + if (i == 2) { + is13simular = _rtl92c_phy_simularity_compare(hw, + result, 0, + 2); + if (is13simular) { + final_candidate = 0; + break; + } + is23simular = _rtl92c_phy_simularity_compare(hw, + result, 1, + 2); + if (is23simular) + final_candidate = 1; + else { + for (i = 0; i < 8; i++) + reg_tmp += result[3][i]; + + if (reg_tmp != 0) + final_candidate = 3; + else + final_candidate = 0xFF; + } + } + } + for (i = 0; i < 4; i++) { + reg_e94 = result[i][0]; + reg_e9c = result[i][1]; + reg_ea4 = result[i][2]; + reg_eac = result[i][3]; + reg_eb4 = result[i][4]; + reg_ebc = result[i][5]; + reg_ec4 = result[i][6]; + reg_ecc = result[i][7]; + } + if (final_candidate != 0xff) { + rtlphy->reg_e94 = reg_e94 = result[final_candidate][0]; + rtlphy->reg_e9c = reg_e9c = result[final_candidate][1]; + reg_ea4 = result[final_candidate][2]; + reg_eac = result[final_candidate][3]; + rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4]; + rtlphy->reg_ebc = reg_ebc = result[final_candidate][5]; + reg_ec4 = result[final_candidate][6]; + reg_ecc = result[final_candidate][7]; + patha_ok = pathb_ok = true; + } else { + rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100; + rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0; + } + if (reg_e94 != 0) /*&&(reg_ea4 != 0) */ + _rtl92c_phy_path_a_fill_iqk_matrix(hw, patha_ok, result, + final_candidate, + (reg_ea4 == 0)); + if (IS_92C_SERIAL(rtlhal->version)) { + if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */ + _rtl92c_phy_path_b_fill_iqk_matrix(hw, pathb_ok, + result, + final_candidate, + (reg_ec4 == 0)); + } + _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, 10); +} +EXPORT_SYMBOL(rtl92c_phy_iq_calibrate); + +void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool start_conttx = false, singletone = false; + + if (start_conttx || singletone) + return; + if (IS_92C_SERIAL(rtlhal->version)) + rtlpriv->cfg->ops->phy_lc_calibrate(hw, true); + else + rtlpriv->cfg->ops->phy_lc_calibrate(hw, false); +} +EXPORT_SYMBOL(rtl92c_phy_lc_calibrate); + +void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (rtlphy->apk_done) + return; + if (IS_92C_SERIAL(rtlhal->version)) + _rtl92c_phy_ap_calibrate(hw, delta, true); + else + _rtl92c_phy_ap_calibrate(hw, delta, false); +} +EXPORT_SYMBOL(rtl92c_phy_ap_calibrate); + +void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (IS_92C_SERIAL(rtlhal->version)) + _rtl92c_phy_set_rfpath_switch(hw, bmain, true); + else + _rtl92c_phy_set_rfpath_switch(hw, bmain, false); +} +EXPORT_SYMBOL(rtl92c_phy_set_rfpath_switch); + +bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + bool postprocessing = false; + + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + ("-->IO Cmd(%#x), set_io_inprogress(%d)\n", + iotype, rtlphy->set_io_inprogress)); + do { + switch (iotype) { + case IO_CMD_RESUME_DM_BY_SCAN: + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + ("[IO CMD] Resume DM after scan.\n")); + postprocessing = true; + break; + case IO_CMD_PAUSE_DM_BY_SCAN: + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + ("[IO CMD] Pause DM before scan.\n")); + postprocessing = true; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("switch case not process\n")); + break; + } + } while (false); + if (postprocessing && !rtlphy->set_io_inprogress) { + rtlphy->set_io_inprogress = true; + rtlphy->current_io_type = iotype; + } else { + return false; + } + rtl92c_phy_set_io(hw); + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype)); + return true; +} +EXPORT_SYMBOL(rtl92c_phy_set_io_cmd); + +void rtl92c_phy_set_io(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + ("--->Cmd(%#x), set_io_inprogress(%d)\n", + rtlphy->current_io_type, rtlphy->set_io_inprogress)); + switch (rtlphy->current_io_type) { + case IO_CMD_RESUME_DM_BY_SCAN: + dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1; + rtl92c_dm_write_dig(hw); + rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); + break; + case IO_CMD_PAUSE_DM_BY_SCAN: + rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue; + dm_digtable.cur_igvalue = 0x17; + rtl92c_dm_write_dig(hw); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("switch case not process\n")); + break; + } + rtlphy->set_io_inprogress = false; + RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, + ("<---(%#x)\n", rtlphy->current_io_type)); +} +EXPORT_SYMBOL(rtl92c_phy_set_io); + +void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); +} +EXPORT_SYMBOL(rtl92ce_phy_set_rf_on); + +void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw) +{ + u32 u4b_tmp; + u8 delay = 5; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); + u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK); + while (u4b_tmp != 0 && delay > 0) { + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); + u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK); + delay--; + } + if (delay == 0) { + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); + RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, + ("Switch RF timeout !!!.\n")); + return; + } + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22); +} +EXPORT_SYMBOL(_rtl92c_phy_set_rf_sleep); diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h new file mode 100644 index 000000000000..53ffb0981586 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h @@ -0,0 +1,246 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#ifndef __RTL92C_PHY_H__ +#define __RTL92C_PHY_H__ + +#define MAX_PRECMD_CNT 16 +#define MAX_RFDEPENDCMD_CNT 16 +#define MAX_POSTCMD_CNT 16 + +#define MAX_DOZE_WAITING_TIMES_9x 64 + +#define RT_CANNOT_IO(hw) false +#define HIGHPOWER_RADIOA_ARRAYLEN 22 + +#define MAX_TOLERANCE 5 +#define IQK_DELAY_TIME 1 + +#define APK_BB_REG_NUM 5 +#define APK_AFE_REG_NUM 16 +#define APK_CURVE_REG_NUM 4 +#define PATH_NUM 2 + +#define LOOP_LIMIT 5 +#define MAX_STALL_TIME 50 +#define AntennaDiversityValue 0x80 +#define MAX_TXPWR_IDX_NMODE_92S 63 +#define Reset_Cnt_Limit 3 + +#define IQK_ADDA_REG_NUM 16 +#define IQK_MAC_REG_NUM 4 + +#define RF90_PATH_MAX 2 + +#define CT_OFFSET_MAC_ADDR 0X16 + +#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A +#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60 +#define CT_OFFSET_HT402S_TX_PWR_IDX_DIF 0x66 +#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69 +#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C + +#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F +#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72 + +#define CT_OFFSET_CHANNEL_PLAH 0x75 +#define CT_OFFSET_THERMAL_METER 0x78 +#define CT_OFFSET_RF_OPTION 0x79 +#define CT_OFFSET_VERSION 0x7E +#define CT_OFFSET_CUSTOMER_ID 0x7F + +#define RTL92C_MAX_PATH_NUM 2 +#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255 +enum swchnlcmd_id { + CMDID_END, + CMDID_SET_TXPOWEROWER_LEVEL, + CMDID_BBREGWRITE10, + CMDID_WRITEPORT_ULONG, + CMDID_WRITEPORT_USHORT, + CMDID_WRITEPORT_UCHAR, + CMDID_RF_WRITEREG, +}; + +struct swchnlcmd { + enum swchnlcmd_id cmdid; + u32 para1; + u32 para2; + u32 msdelay; +}; + +enum hw90_block_e { + HW90_BLOCK_MAC = 0, + HW90_BLOCK_PHY0 = 1, + HW90_BLOCK_PHY1 = 2, + HW90_BLOCK_RF = 3, + HW90_BLOCK_MAXIMUM = 4, +}; + +enum baseband_config_type { + BASEBAND_CONFIG_PHY_REG = 0, + BASEBAND_CONFIG_AGC_TAB = 1, +}; + +enum ra_offset_area { + RA_OFFSET_LEGACY_OFDM1, + RA_OFFSET_LEGACY_OFDM2, + RA_OFFSET_HT_OFDM1, + RA_OFFSET_HT_OFDM2, + RA_OFFSET_HT_OFDM3, + RA_OFFSET_HT_OFDM4, + RA_OFFSET_HT_CCK, +}; + +enum antenna_path { + ANTENNA_NONE, + ANTENNA_D, + ANTENNA_C, + ANTENNA_CD, + ANTENNA_B, + ANTENNA_BD, + ANTENNA_BC, + ANTENNA_BCD, + ANTENNA_A, + ANTENNA_AD, + ANTENNA_AC, + ANTENNA_ACD, + ANTENNA_AB, + ANTENNA_ABD, + ANTENNA_ABC, + ANTENNA_ABCD +}; + +struct r_antenna_select_ofdm { + u32 r_tx_antenna:4; + u32 r_ant_l:4; + u32 r_ant_non_ht:4; + u32 r_ant_ht1:4; + u32 r_ant_ht2:4; + u32 r_ant_ht_s1:4; + u32 r_ant_non_ht_s1:4; + u32 ofdm_txsc:2; + u32 reserved:2; +}; + +struct r_antenna_select_cck { + u8 r_cckrx_enable_2:2; + u8 r_cckrx_enable:2; + u8 r_ccktx_enable:4; +}; + +struct efuse_contents { + u8 mac_addr[ETH_ALEN]; + u8 cck_tx_power_idx[6]; + u8 ht40_1s_tx_power_idx[6]; + u8 ht40_2s_tx_power_idx_diff[3]; + u8 ht20_tx_power_idx_diff[3]; + u8 ofdm_tx_power_idx_diff[3]; + u8 ht40_max_power_offset[3]; + u8 ht20_max_power_offset[3]; + u8 channel_plan; + u8 thermal_meter; + u8 rf_option[5]; + u8 version; + u8 oem_id; + u8 regulatory; +}; + +struct tx_power_struct { + u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 legacy_ht_txpowerdiff; + u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER]; + u8 pwrgroup_cnt; + u32 mcs_original_offset[4][16]; +}; + +extern u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask); +extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data); +extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask); +extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, + u32 bitmask, u32 data); +extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw); +extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw); +extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw); +extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw, + enum radio_path rfpath); +extern void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw); +extern void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, + long *powerlevel); +extern void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel); +extern bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, + long power_indbm); +extern void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, + u8 operation); +extern void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw); +extern void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type); +extern void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw); +extern u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw); +extern void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); +extern void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, + u16 beaconinterval); +void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw); +void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); +bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath); +extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, + u32 rfpath); +extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state); +void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw); +void rtl92c_phy_set_io(struct ieee80211_hw *hw); +void rtl92c_bb_block_on(struct ieee80211_hw *hw); +u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask); +long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + u8 txpwridx); +u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, + enum wireless_mode wirelessmode, + long power_indbm); +void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); +static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, + u32 cmdtableidx, u32 cmdtablesz, + enum swchnlcmd_id cmdid, u32 para1, + u32 para2, u32 msdelay); +static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, + u8 channel, u8 *stage, u8 *step, + u32 *delay); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile index 0f0be7c763b8..c0cb0cfe7d37 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile @@ -1,6 +1,5 @@ rtl8192ce-objs := \ dm.o \ - fw.o \ hw.o \ led.o \ phy.o \ @@ -10,3 +9,5 @@ rtl8192ce-objs := \ trx.o obj-$(CONFIG_RTL8192CE) += rtl8192ce.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h index 83cd64895292..2f577c8828fc 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h @@ -121,11 +121,37 @@ #define CHIP_92C 0x01 #define CHIP_88C 0x00 +/* Add vendor information into chip version definition. + * Add UMC B-Cut and RTL8723 chip info definition. + * + * BIT 7 Reserved + * BIT 6 UMC BCut + * BIT 5 Manufacturer(TSMC/UMC) + * BIT 4 TEST/NORMAL + * BIT 3 8723 Version + * BIT 2 8723? + * BIT 1 1T2R? + * BIT 0 88C/92C +*/ + enum version_8192c { VERSION_A_CHIP_92C = 0x01, VERSION_A_CHIP_88C = 0x00, VERSION_B_CHIP_92C = 0x11, VERSION_B_CHIP_88C = 0x10, + VERSION_TEST_CHIP_88C = 0x00, + VERSION_TEST_CHIP_92C = 0x01, + VERSION_NORMAL_TSMC_CHIP_88C = 0x10, + VERSION_NORMAL_TSMC_CHIP_92C = 0x11, + VERSION_NORMAL_TSMC_CHIP_92C_1T2R = 0x13, + VERSION_NORMAL_UMC_CHIP_88C_A_CUT = 0x30, + VERSION_NORMAL_UMC_CHIP_92C_A_CUT = 0x31, + VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT = 0x33, + VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT = 0x34, + VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT = 0x3c, + VERSION_NORMAL_UMC_CHIP_88C_B_CUT = 0x70, + VERSION_NORMAL_UMC_CHIP_92C_B_CUT = 0x71, + VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT = 0x73, VERSION_UNKNOWN = 0x88, }; @@ -254,4 +280,122 @@ struct h2c_cmd_8192c { u8 *p_cmdbuffer; }; +static inline u8 _rtl92c_get_chnl_group(u8 chnl) +{ + u8 group = 0; + + if (chnl < 3) + group = 0; + else if (chnl < 9) + group = 1; + else + group = 2; + + return group; +} + +/* NOTE: reference to rtl8192c_rates struct */ +static inline int _rtl92c_rate_mapping(struct ieee80211_hw *hw, bool isHT, + u8 desc_rate, bool first_ampdu) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int rate_idx = 0; + + if (first_ampdu) { + if (false == isHT) { + switch (desc_rate) { + case DESC92C_RATE1M: + rate_idx = 0; + break; + case DESC92C_RATE2M: + rate_idx = 1; + break; + case DESC92C_RATE5_5M: + rate_idx = 2; + break; + case DESC92C_RATE11M: + rate_idx = 3; + break; + case DESC92C_RATE6M: + rate_idx = 4; + break; + case DESC92C_RATE9M: + rate_idx = 5; + break; + case DESC92C_RATE12M: + rate_idx = 6; + break; + case DESC92C_RATE18M: + rate_idx = 7; + break; + case DESC92C_RATE24M: + rate_idx = 8; + break; + case DESC92C_RATE36M: + rate_idx = 9; + break; + case DESC92C_RATE48M: + rate_idx = 10; + break; + case DESC92C_RATE54M: + rate_idx = 11; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, + ("Rate %d is not support, set to " + "1M rate.\n", desc_rate)); + rate_idx = 0; + break; + } + } else { + rate_idx = 11; + } + return rate_idx; + } + switch (desc_rate) { + case DESC92C_RATE1M: + rate_idx = 0; + break; + case DESC92C_RATE2M: + rate_idx = 1; + break; + case DESC92C_RATE5_5M: + rate_idx = 2; + break; + case DESC92C_RATE11M: + rate_idx = 3; + break; + case DESC92C_RATE6M: + rate_idx = 4; + break; + case DESC92C_RATE9M: + rate_idx = 5; + break; + case DESC92C_RATE12M: + rate_idx = 6; + break; + case DESC92C_RATE18M: + rate_idx = 7; + break; + case DESC92C_RATE24M: + rate_idx = 8; + break; + case DESC92C_RATE36M: + rate_idx = 9; + break; + case DESC92C_RATE48M: + rate_idx = 10; + break; + case DESC92C_RATE54M: + rate_idx = 11; + break; + /* TODO: How to mapping MCS rate? */ + /* NOTE: referenc to __ieee80211_rx */ + default: + rate_idx = 11; + break; + } + return rate_idx; +} + #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c index 62e7c64e087b..7d76504df4d1 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c @@ -33,487 +33,15 @@ #include "def.h" #include "phy.h" #include "dm.h" -#include "fw.h" -struct dig_t dm_digtable; -static struct ps_t dm_pstable; - -static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = { - 0x7f8001fe, - 0x788001e2, - 0x71c001c7, - 0x6b8001ae, - 0x65400195, - 0x5fc0017f, - 0x5a400169, - 0x55400155, - 0x50800142, - 0x4c000130, - 0x47c0011f, - 0x43c0010f, - 0x40000100, - 0x3c8000f2, - 0x390000e4, - 0x35c000d7, - 0x32c000cb, - 0x300000c0, - 0x2d4000b5, - 0x2ac000ab, - 0x288000a2, - 0x26000098, - 0x24000090, - 0x22000088, - 0x20000080, - 0x1e400079, - 0x1c800072, - 0x1b00006c, - 0x19800066, - 0x18000060, - 0x16c0005b, - 0x15800056, - 0x14400051, - 0x1300004c, - 0x12000048, - 0x11000044, - 0x10000040, -}; - -static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = { - {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, - {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, - {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, - {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, - {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, - {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, - {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, - {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, - {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, - {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, - {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, - {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, - {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, - {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, - {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, - {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, - {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, - {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, - {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, - {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, - {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, - {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, - {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, - {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, - {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, - {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, - {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, - {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, - {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, - {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, - {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, - {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, - {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} -}; - -static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = { - {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, - {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, - {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, - {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, - {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, - {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, - {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, - {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, - {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, - {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, - {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, - {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, - {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, - {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, - {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, - {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, - {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, - {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, - {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, - {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, - {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, - {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, - {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, - {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, - {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, - {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, - {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, - {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, - {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, - {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, - {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, - {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, - {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} -}; - -static void rtl92c_dm_diginit(struct ieee80211_hw *hw) -{ - dm_digtable.dig_enable_flag = true; - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; - dm_digtable.cur_igvalue = 0x20; - dm_digtable.pre_igvalue = 0x0; - dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; - dm_digtable.presta_connectstate = DIG_STA_DISCONNECT; - dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT; - dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; - dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; - dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; - dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; - dm_digtable.rx_gain_range_max = DM_DIG_MAX; - dm_digtable.rx_gain_range_min = DM_DIG_MIN; - dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; - dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX; - dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN; - dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX; - dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; -} - -static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - long rssi_val_min = 0; - - if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) && - (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) { - if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0) - rssi_val_min = - (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb > - rtlpriv->dm.undecorated_smoothed_pwdb) ? - rtlpriv->dm.undecorated_smoothed_pwdb : - rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; - else - rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; - } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT || - dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) { - rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; - } else if (dm_digtable.curmultista_connectstate == - DIG_MULTISTA_CONNECT) { - rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; - } - - return (u8) rssi_val_min; -} - -static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) -{ - u32 ret_value; - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); - - ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD); - falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16); - - ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD); - falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff); - falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16); - - ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD); - falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff); - falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail + - falsealm_cnt->cnt_rate_illegal + - falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail; - - rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1); - ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0); - falsealm_cnt->cnt_cck_fail = ret_value; - - ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3); - falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8; - falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail + - falsealm_cnt->cnt_rate_illegal + - falsealm_cnt->cnt_crc8_fail + - falsealm_cnt->cnt_mcs_fail + - falsealm_cnt->cnt_cck_fail); - - rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1); - rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0); - rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0); - rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2); - - RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - ("cnt_parity_fail = %d, cnt_rate_illegal = %d, " - "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n", - falsealm_cnt->cnt_parity_fail, - falsealm_cnt->cnt_rate_illegal, - falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail)); - - RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n", - falsealm_cnt->cnt_ofdm_fail, - falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all)); -} - -static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u8 value_igi = dm_digtable.cur_igvalue; - - if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) - value_igi--; - else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1) - value_igi += 0; - else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2) - value_igi++; - else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2) - value_igi += 2; - if (value_igi > DM_DIG_FA_UPPER) - value_igi = DM_DIG_FA_UPPER; - else if (value_igi < DM_DIG_FA_LOWER) - value_igi = DM_DIG_FA_LOWER; - if (rtlpriv->falsealm_cnt.cnt_all > 10000) - value_igi = 0x32; - - dm_digtable.cur_igvalue = value_igi; - rtl92c_dm_write_dig(hw); -} - -static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) { - if ((dm_digtable.backoff_val - 2) < - dm_digtable.backoff_val_range_min) - dm_digtable.backoff_val = - dm_digtable.backoff_val_range_min; - else - dm_digtable.backoff_val -= 2; - } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) { - if ((dm_digtable.backoff_val + 2) > - dm_digtable.backoff_val_range_max) - dm_digtable.backoff_val = - dm_digtable.backoff_val_range_max; - else - dm_digtable.backoff_val += 2; - } - - if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) > - dm_digtable.rx_gain_range_max) - dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max; - else if ((dm_digtable.rssi_val_min + 10 - - dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min) - dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min; - else - dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 - - dm_digtable.backoff_val; - - RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - ("rssi_val_min = %x backoff_val %x\n", - dm_digtable.rssi_val_min, dm_digtable.backoff_val)); - - rtl92c_dm_write_dig(hw); -} - -static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw) -{ - static u8 binitialized; /* initialized to false */ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; - bool b_multi_sta = false; - - if (mac->opmode == NL80211_IFTYPE_ADHOC) - b_multi_sta = true; - - if ((b_multi_sta == false) || (dm_digtable.cursta_connectctate != - DIG_STA_DISCONNECT)) { - binitialized = false; - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; - return; - } else if (binitialized == false) { - binitialized = true; - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; - dm_digtable.cur_igvalue = 0x20; - rtl92c_dm_write_dig(hw); - } - - if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) { - if ((rssi_strength < dm_digtable.rssi_lowthresh) && - (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) { - - if (dm_digtable.dig_ext_port_stage == - DIG_EXT_PORT_STAGE_2) { - dm_digtable.cur_igvalue = 0x20; - rtl92c_dm_write_dig(hw); - } - - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1; - } else if (rssi_strength > dm_digtable.rssi_highthresh) { - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2; - rtl92c_dm_ctrl_initgain_by_fa(hw); - } - } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) { - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; - dm_digtable.cur_igvalue = 0x20; - rtl92c_dm_write_dig(hw); - } - - RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - ("curmultista_connectstate = " - "%x dig_ext_port_stage %x\n", - dm_digtable.curmultista_connectstate, - dm_digtable.dig_ext_port_stage)); -} - -static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - ("presta_connectstate = %x," - " cursta_connectctate = %x\n", - dm_digtable.presta_connectstate, - dm_digtable.cursta_connectctate)); - - if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate - || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT - || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { - - if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) { - dm_digtable.rssi_val_min = - rtl92c_dm_initial_gain_min_pwdb(hw); - rtl92c_dm_ctrl_initgain_by_rssi(hw); - } - } else { - dm_digtable.rssi_val_min = 0; - dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; - dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; - dm_digtable.cur_igvalue = 0x20; - dm_digtable.pre_igvalue = 0; - rtl92c_dm_write_dig(hw); - } -} - -static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - - if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { - dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); - - if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { - if (dm_digtable.rssi_val_min <= 25) - dm_digtable.cur_cck_pd_state = - CCK_PD_STAGE_LowRssi; - else - dm_digtable.cur_cck_pd_state = - CCK_PD_STAGE_HighRssi; - } else { - if (dm_digtable.rssi_val_min <= 20) - dm_digtable.cur_cck_pd_state = - CCK_PD_STAGE_LowRssi; - else - dm_digtable.cur_cck_pd_state = - CCK_PD_STAGE_HighRssi; - } - } else { - dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; - } - - if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) { - if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) { - if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800) - dm_digtable.cur_cck_fa_state = - CCK_FA_STAGE_High; - else - dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low; - - if (dm_digtable.pre_cck_fa_state != - dm_digtable.cur_cck_fa_state) { - if (dm_digtable.cur_cck_fa_state == - CCK_FA_STAGE_Low) - rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, - 0x83); - else - rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, - 0xcd); - - dm_digtable.pre_cck_fa_state = - dm_digtable.cur_cck_fa_state; - } - - rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40); - - if (IS_92C_SERIAL(rtlhal->version)) - rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, - MASKBYTE2, 0xd7); - } else { - rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd); - rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47); - - if (IS_92C_SERIAL(rtlhal->version)) - rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, - MASKBYTE2, 0xd3); - } - dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state; - } - - RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state)); - - RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, - ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version))); -} - -static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) -{ - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - - if (mac->act_scanning == true) - return; - - if ((mac->link_state > MAC80211_NOLINK) && - (mac->link_state < MAC80211_LINKED)) - dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT; - else if (mac->link_state >= MAC80211_LINKED) - dm_digtable.cursta_connectctate = DIG_STA_CONNECT; - else - dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; - - rtl92c_dm_initial_gain_sta(hw); - rtl92c_dm_initial_gain_multi_sta(hw); - rtl92c_dm_cck_packet_detection_thresh(hw); - - dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate; - -} - -static void rtl92c_dm_dig(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - if (rtlpriv->dm.b_dm_initialgain_enable == false) - return; - if (dm_digtable.dig_enable_flag == false) - return; - - rtl92c_dm_ctrl_initgain_by_twoport(hw); - -} - -static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtlpriv->dm.bdynamic_txpower_enable = false; - - rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; - rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; -} - -static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw) +void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); long undecorated_smoothed_pwdb; - if (!rtlpriv->dm.bdynamic_txpower_enable) + if (!rtlpriv->dm.dynamic_txpower_enable) return; if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { @@ -583,891 +111,3 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw) rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; } - -void rtl92c_dm_write_dig(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, - ("cur_igvalue = 0x%x, " - "pre_igvalue = 0x%x, backoff_val = %d\n", - dm_digtable.cur_igvalue, dm_digtable.pre_igvalue, - dm_digtable.backoff_val)); - - if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) { - rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, - dm_digtable.cur_igvalue); - rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, - dm_digtable.cur_igvalue); - - dm_digtable.pre_igvalue = dm_digtable.cur_igvalue; - } -} - -static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff; - - u8 h2c_parameter[3] = { 0 }; - - return; - - if (tmpentry_max_pwdb != 0) { - rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = - tmpentry_max_pwdb; - } else { - rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0; - } - - if (tmpentry_min_pwdb != 0xff) { - rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = - tmpentry_min_pwdb; - } else { - rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0; - } - - h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF); - h2c_parameter[0] = 0; - - rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter); -} - -void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - rtlpriv->dm.bcurrent_turbo_edca = false; - rtlpriv->dm.bis_any_nonbepkts = false; - rtlpriv->dm.bis_cur_rdlstate = false; -} - -static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - static u64 last_txok_cnt; - static u64 last_rxok_cnt; - u64 cur_txok_cnt; - u64 cur_rxok_cnt; - u32 edca_be_ul = 0x5ea42b; - u32 edca_be_dl = 0x5ea42b; - - if (mac->opmode == NL80211_IFTYPE_ADHOC) - goto dm_checkedcaturbo_exit; - - if (mac->link_state != MAC80211_LINKED) { - rtlpriv->dm.bcurrent_turbo_edca = false; - return; - } - - if (!mac->ht_enable) { /*FIX MERGE */ - if (!(edca_be_ul & 0xffff0000)) - edca_be_ul |= 0x005e0000; - - if (!(edca_be_dl & 0xffff0000)) - edca_be_dl |= 0x005e0000; - } - - if ((!rtlpriv->dm.bis_any_nonbepkts) && - (!rtlpriv->dm.b_disable_framebursting)) { - cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; - cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; - if (cur_rxok_cnt > 4 * cur_txok_cnt) { - if (!rtlpriv->dm.bis_cur_rdlstate || - !rtlpriv->dm.bcurrent_turbo_edca) { - rtl_write_dword(rtlpriv, - REG_EDCA_BE_PARAM, - edca_be_dl); - rtlpriv->dm.bis_cur_rdlstate = true; - } - } else { - if (rtlpriv->dm.bis_cur_rdlstate || - !rtlpriv->dm.bcurrent_turbo_edca) { - rtl_write_dword(rtlpriv, - REG_EDCA_BE_PARAM, - edca_be_ul); - rtlpriv->dm.bis_cur_rdlstate = false; - } - } - rtlpriv->dm.bcurrent_turbo_edca = true; - } else { - if (rtlpriv->dm.bcurrent_turbo_edca) { - u8 tmp = AC0_BE; - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_AC_PARAM, - (u8 *) (&tmp)); - rtlpriv->dm.bcurrent_turbo_edca = false; - } - } - -dm_checkedcaturbo_exit: - rtlpriv->dm.bis_any_nonbepkts = false; - last_txok_cnt = rtlpriv->stats.txbytesunicast; - last_rxok_cnt = rtlpriv->stats.rxbytesunicast; -} - -static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw - *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 thermalvalue, delta, delta_lck, delta_iqk; - long ele_a, ele_d, temp_cck, val_x, value32; - long val_y, ele_c; - u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old; - int i; - bool is2t = IS_92C_SERIAL(rtlhal->version); - u8 txpwr_level[2] = {0, 0}; - u8 ofdm_min_index = 6, rf; - - rtlpriv->dm.btxpower_trackingInit = true; - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n")); - - thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f); - - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x " - "eeprom_thermalmeter 0x%x\n", - thermalvalue, rtlpriv->dm.thermalvalue, - rtlefuse->eeprom_thermalmeter)); - - rtl92c_phy_ap_calibrate(hw, (thermalvalue - - rtlefuse->eeprom_thermalmeter)); - if (is2t) - rf = 2; - else - rf = 1; - - if (thermalvalue) { - ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, - MASKDWORD) & MASKOFDM_D; - - for (i = 0; i < OFDM_TABLE_LENGTH; i++) { - if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { - ofdm_index_old[0] = (u8) i; - - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - ("Initial pathA ele_d reg0x%x = 0x%lx, " - "ofdm_index=0x%x\n", - ROFDM0_XATXIQIMBALANCE, - ele_d, ofdm_index_old[0])); - break; - } - } - - if (is2t) { - ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, - MASKDWORD) & MASKOFDM_D; - - for (i = 0; i < OFDM_TABLE_LENGTH; i++) { - if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) { - ofdm_index_old[1] = (u8) i; - - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, - DBG_LOUD, - ("Initial pathB ele_d reg0x%x = " - "0x%lx, ofdm_index=0x%x\n", - ROFDM0_XBTXIQIMBALANCE, ele_d, - ofdm_index_old[1])); - break; - } - } - } - - temp_cck = - rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK; - - for (i = 0; i < CCK_TABLE_LENGTH; i++) { - if (rtlpriv->dm.b_cck_inch14) { - if (memcmp((void *)&temp_cck, - (void *)&cckswing_table_ch14[i][2], - 4) == 0) { - cck_index_old = (u8) i; - - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, - DBG_LOUD, - ("Initial reg0x%x = 0x%lx, " - "cck_index=0x%x, ch 14 %d\n", - RCCK0_TXFILTER2, temp_cck, - cck_index_old, - rtlpriv->dm.b_cck_inch14)); - break; - } - } else { - if (memcmp((void *)&temp_cck, - (void *) - &cckswing_table_ch1ch13[i][2], - 4) == 0) { - cck_index_old = (u8) i; - - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, - DBG_LOUD, - ("Initial reg0x%x = 0x%lx, " - "cck_index=0x%x, ch14 %d\n", - RCCK0_TXFILTER2, temp_cck, - cck_index_old, - rtlpriv->dm.b_cck_inch14)); - break; - } - } - } - - if (!rtlpriv->dm.thermalvalue) { - rtlpriv->dm.thermalvalue = - rtlefuse->eeprom_thermalmeter; - rtlpriv->dm.thermalvalue_lck = thermalvalue; - rtlpriv->dm.thermalvalue_iqk = thermalvalue; - for (i = 0; i < rf; i++) - rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i]; - rtlpriv->dm.cck_index = cck_index_old; - } - - delta = (thermalvalue > rtlpriv->dm.thermalvalue) ? - (thermalvalue - rtlpriv->dm.thermalvalue) : - (rtlpriv->dm.thermalvalue - thermalvalue); - - delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ? - (thermalvalue - rtlpriv->dm.thermalvalue_lck) : - (rtlpriv->dm.thermalvalue_lck - thermalvalue); - - delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ? - (thermalvalue - rtlpriv->dm.thermalvalue_iqk) : - (rtlpriv->dm.thermalvalue_iqk - thermalvalue); - - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x " - "eeprom_thermalmeter 0x%x delta 0x%x " - "delta_lck 0x%x delta_iqk 0x%x\n", - thermalvalue, rtlpriv->dm.thermalvalue, - rtlefuse->eeprom_thermalmeter, delta, delta_lck, - delta_iqk)); - - if (delta_lck > 1) { - rtlpriv->dm.thermalvalue_lck = thermalvalue; - rtl92c_phy_lc_calibrate(hw); - } - - if (delta > 0 && rtlpriv->dm.txpower_track_control) { - if (thermalvalue > rtlpriv->dm.thermalvalue) { - for (i = 0; i < rf; i++) - rtlpriv->dm.ofdm_index[i] -= delta; - rtlpriv->dm.cck_index -= delta; - } else { - for (i = 0; i < rf; i++) - rtlpriv->dm.ofdm_index[i] += delta; - rtlpriv->dm.cck_index += delta; - } - - if (is2t) { - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - ("temp OFDM_A_index=0x%x, " - "OFDM_B_index=0x%x," - "cck_index=0x%x\n", - rtlpriv->dm.ofdm_index[0], - rtlpriv->dm.ofdm_index[1], - rtlpriv->dm.cck_index)); - } else { - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - ("temp OFDM_A_index=0x%x," - "cck_index=0x%x\n", - rtlpriv->dm.ofdm_index[0], - rtlpriv->dm.cck_index)); - } - - if (thermalvalue > rtlefuse->eeprom_thermalmeter) { - for (i = 0; i < rf; i++) - ofdm_index[i] = - rtlpriv->dm.ofdm_index[i] - + 1; - cck_index = rtlpriv->dm.cck_index + 1; - } else { - for (i = 0; i < rf; i++) - ofdm_index[i] = - rtlpriv->dm.ofdm_index[i]; - cck_index = rtlpriv->dm.cck_index; - } - - for (i = 0; i < rf; i++) { - if (txpwr_level[i] >= 0 && - txpwr_level[i] <= 26) { - if (thermalvalue > - rtlefuse->eeprom_thermalmeter) { - if (delta < 5) - ofdm_index[i] -= 1; - - else - ofdm_index[i] -= 2; - } else if (delta > 5 && thermalvalue < - rtlefuse-> - eeprom_thermalmeter) { - ofdm_index[i] += 1; - } - } else if (txpwr_level[i] >= 27 && - txpwr_level[i] <= 32 - && thermalvalue > - rtlefuse->eeprom_thermalmeter) { - if (delta < 5) - ofdm_index[i] -= 1; - - else - ofdm_index[i] -= 2; - } else if (txpwr_level[i] >= 32 && - txpwr_level[i] <= 38 && - thermalvalue > - rtlefuse->eeprom_thermalmeter - && delta > 5) { - ofdm_index[i] -= 1; - } - } - - if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) { - if (thermalvalue > - rtlefuse->eeprom_thermalmeter) { - if (delta < 5) - cck_index -= 1; - - else - cck_index -= 2; - } else if (delta > 5 && thermalvalue < - rtlefuse->eeprom_thermalmeter) { - cck_index += 1; - } - } else if (txpwr_level[i] >= 27 && - txpwr_level[i] <= 32 && - thermalvalue > - rtlefuse->eeprom_thermalmeter) { - if (delta < 5) - cck_index -= 1; - - else - cck_index -= 2; - } else if (txpwr_level[i] >= 32 && - txpwr_level[i] <= 38 && - thermalvalue > rtlefuse->eeprom_thermalmeter - && delta > 5) { - cck_index -= 1; - } - - for (i = 0; i < rf; i++) { - if (ofdm_index[i] > OFDM_TABLE_SIZE - 1) - ofdm_index[i] = OFDM_TABLE_SIZE - 1; - - else if (ofdm_index[i] < ofdm_min_index) - ofdm_index[i] = ofdm_min_index; - } - - if (cck_index > CCK_TABLE_SIZE - 1) - cck_index = CCK_TABLE_SIZE - 1; - else if (cck_index < 0) - cck_index = 0; - - if (is2t) { - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - ("new OFDM_A_index=0x%x, " - "OFDM_B_index=0x%x," - "cck_index=0x%x\n", - ofdm_index[0], ofdm_index[1], - cck_index)); - } else { - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - ("new OFDM_A_index=0x%x," - "cck_index=0x%x\n", - ofdm_index[0], cck_index)); - } - } - - if (rtlpriv->dm.txpower_track_control && delta != 0) { - ele_d = - (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22; - val_x = rtlphy->reg_e94; - val_y = rtlphy->reg_e9c; - - if (val_x != 0) { - if ((val_x & 0x00000200) != 0) - val_x = val_x | 0xFFFFFC00; - ele_a = ((val_x * ele_d) >> 8) & 0x000003FF; - - if ((val_y & 0x00000200) != 0) - val_y = val_y | 0xFFFFFC00; - ele_c = ((val_y * ele_d) >> 8) & 0x000003FF; - - value32 = (ele_d << 22) | - ((ele_c & 0x3F) << 16) | ele_a; - - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, - MASKDWORD, value32); - - value32 = (ele_c & 0x000003C0) >> 6; - rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, - value32); - - value32 = ((val_x * ele_d) >> 7) & 0x01; - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, - BIT(31), value32); - - value32 = ((val_y * ele_d) >> 7) & 0x01; - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, - BIT(29), value32); - } else { - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, - MASKDWORD, - ofdmswing_table[ofdm_index[0]]); - - rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, - 0x00); - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, - BIT(31) | BIT(29), 0x00); - } - - if (!rtlpriv->dm.b_cck_inch14) { - rtl_write_byte(rtlpriv, 0xa22, - cckswing_table_ch1ch13[cck_index] - [0]); - rtl_write_byte(rtlpriv, 0xa23, - cckswing_table_ch1ch13[cck_index] - [1]); - rtl_write_byte(rtlpriv, 0xa24, - cckswing_table_ch1ch13[cck_index] - [2]); - rtl_write_byte(rtlpriv, 0xa25, - cckswing_table_ch1ch13[cck_index] - [3]); - rtl_write_byte(rtlpriv, 0xa26, - cckswing_table_ch1ch13[cck_index] - [4]); - rtl_write_byte(rtlpriv, 0xa27, - cckswing_table_ch1ch13[cck_index] - [5]); - rtl_write_byte(rtlpriv, 0xa28, - cckswing_table_ch1ch13[cck_index] - [6]); - rtl_write_byte(rtlpriv, 0xa29, - cckswing_table_ch1ch13[cck_index] - [7]); - } else { - rtl_write_byte(rtlpriv, 0xa22, - cckswing_table_ch14[cck_index] - [0]); - rtl_write_byte(rtlpriv, 0xa23, - cckswing_table_ch14[cck_index] - [1]); - rtl_write_byte(rtlpriv, 0xa24, - cckswing_table_ch14[cck_index] - [2]); - rtl_write_byte(rtlpriv, 0xa25, - cckswing_table_ch14[cck_index] - [3]); - rtl_write_byte(rtlpriv, 0xa26, - cckswing_table_ch14[cck_index] - [4]); - rtl_write_byte(rtlpriv, 0xa27, - cckswing_table_ch14[cck_index] - [5]); - rtl_write_byte(rtlpriv, 0xa28, - cckswing_table_ch14[cck_index] - [6]); - rtl_write_byte(rtlpriv, 0xa29, - cckswing_table_ch14[cck_index] - [7]); - } - - if (is2t) { - ele_d = (ofdmswing_table[ofdm_index[1]] & - 0xFFC00000) >> 22; - - val_x = rtlphy->reg_eb4; - val_y = rtlphy->reg_ebc; - - if (val_x != 0) { - if ((val_x & 0x00000200) != 0) - val_x = val_x | 0xFFFFFC00; - ele_a = ((val_x * ele_d) >> 8) & - 0x000003FF; - - if ((val_y & 0x00000200) != 0) - val_y = val_y | 0xFFFFFC00; - ele_c = ((val_y * ele_d) >> 8) & - 0x00003FF; - - value32 = (ele_d << 22) | - ((ele_c & 0x3F) << 16) | ele_a; - rtl_set_bbreg(hw, - ROFDM0_XBTXIQIMBALANCE, - MASKDWORD, value32); - - value32 = (ele_c & 0x000003C0) >> 6; - rtl_set_bbreg(hw, ROFDM0_XDTXAFE, - MASKH4BITS, value32); - - value32 = ((val_x * ele_d) >> 7) & 0x01; - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, - BIT(27), value32); - - value32 = ((val_y * ele_d) >> 7) & 0x01; - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, - BIT(25), value32); - } else { - rtl_set_bbreg(hw, - ROFDM0_XBTXIQIMBALANCE, - MASKDWORD, - ofdmswing_table[ofdm_index - [1]]); - rtl_set_bbreg(hw, ROFDM0_XDTXAFE, - MASKH4BITS, 0x00); - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, - BIT(27) | BIT(25), 0x00); - } - - } - } - - if (delta_iqk > 3) { - rtlpriv->dm.thermalvalue_iqk = thermalvalue; - rtl92c_phy_iq_calibrate(hw, false); - } - - if (rtlpriv->dm.txpower_track_control) - rtlpriv->dm.thermalvalue = thermalvalue; - } - - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n")); - -} - -static void rtl92c_dm_initialize_txpower_tracking_thermalmeter( - struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtlpriv->dm.btxpower_tracking = true; - rtlpriv->dm.btxpower_trackingInit = false; - - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - ("pMgntInfo->btxpower_tracking = %d\n", - rtlpriv->dm.btxpower_tracking)); -} - -static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw) -{ - rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw); -} - -static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw) -{ - rtl92c_dm_txpower_tracking_callback_thermalmeter(hw); -} - -static void rtl92c_dm_check_txpower_tracking_thermal_meter( - struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - static u8 tm_trigger; - - if (!rtlpriv->dm.btxpower_tracking) - return; - - if (!tm_trigger) { - rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK, - 0x60); - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - ("Trigger 92S Thermal Meter!!\n")); - tm_trigger = 1; - return; - } else { - RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, - ("Schedule TxPowerTracking direct call!!\n")); - rtl92c_dm_txpower_tracking_directcall(hw); - tm_trigger = 0; - } -} - -void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw) -{ - rtl92c_dm_check_txpower_tracking_thermal_meter(hw); -} - -void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rate_adaptive *p_ra = &(rtlpriv->ra); - - p_ra->ratr_state = DM_RATR_STA_INIT; - p_ra->pre_ratr_state = DM_RATR_STA_INIT; - - if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) - rtlpriv->dm.b_useramask = true; - else - rtlpriv->dm.b_useramask = false; - -} - -static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rate_adaptive *p_ra = &(rtlpriv->ra); - u32 low_rssithresh_for_ra, high_rssithresh_for_ra; - - if (is_hal_stop(rtlhal)) { - RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, - ("<---- driver is going to unload\n")); - return; - } - - if (!rtlpriv->dm.b_useramask) { - RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, - ("<---- driver does not control rate adaptive mask\n")); - return; - } - - if (mac->link_state == MAC80211_LINKED) { - - switch (p_ra->pre_ratr_state) { - case DM_RATR_STA_HIGH: - high_rssithresh_for_ra = 50; - low_rssithresh_for_ra = 20; - break; - case DM_RATR_STA_MIDDLE: - high_rssithresh_for_ra = 55; - low_rssithresh_for_ra = 20; - break; - case DM_RATR_STA_LOW: - high_rssithresh_for_ra = 50; - low_rssithresh_for_ra = 25; - break; - default: - high_rssithresh_for_ra = 50; - low_rssithresh_for_ra = 20; - break; - } - - if (rtlpriv->dm.undecorated_smoothed_pwdb > - (long)high_rssithresh_for_ra) - p_ra->ratr_state = DM_RATR_STA_HIGH; - else if (rtlpriv->dm.undecorated_smoothed_pwdb > - (long)low_rssithresh_for_ra) - p_ra->ratr_state = DM_RATR_STA_MIDDLE; - else - p_ra->ratr_state = DM_RATR_STA_LOW; - - if (p_ra->pre_ratr_state != p_ra->ratr_state) { - RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, - ("RSSI = %ld\n", - rtlpriv->dm.undecorated_smoothed_pwdb)); - RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, - ("RSSI_LEVEL = %d\n", p_ra->ratr_state)); - RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD, - ("PreState = %d, CurState = %d\n", - p_ra->pre_ratr_state, p_ra->ratr_state)); - - rtlpriv->cfg->ops->update_rate_mask(hw, - p_ra->ratr_state); - - p_ra->pre_ratr_state = p_ra->ratr_state; - } - } -} - -static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) -{ - dm_pstable.pre_ccastate = CCA_MAX; - dm_pstable.cur_ccasate = CCA_MAX; - dm_pstable.pre_rfstate = RF_MAX; - dm_pstable.cur_rfstate = RF_MAX; - dm_pstable.rssi_val_min = 0; -} - -static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - if (dm_pstable.rssi_val_min != 0) { - if (dm_pstable.pre_ccastate == CCA_2R) { - if (dm_pstable.rssi_val_min >= 35) - dm_pstable.cur_ccasate = CCA_1R; - else - dm_pstable.cur_ccasate = CCA_2R; - } else { - if (dm_pstable.rssi_val_min <= 30) - dm_pstable.cur_ccasate = CCA_2R; - else - dm_pstable.cur_ccasate = CCA_1R; - } - } else { - dm_pstable.cur_ccasate = CCA_MAX; - } - - if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) { - if (dm_pstable.cur_ccasate == CCA_1R) { - if (get_rf_type(rtlphy) == RF_2T2R) { - rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, - MASKBYTE0, 0x13); - rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20); - } else { - rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, - MASKBYTE0, 0x23); - rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c); - } - } else { - rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, - 0x33); - rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63); - } - dm_pstable.pre_ccastate = dm_pstable.cur_ccasate; - } - - RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n", - (dm_pstable.cur_ccasate == - 0) ? "1RCCA" : "2RCCA")); -} - -void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) -{ - static u8 initialize; - static u32 reg_874, reg_c70, reg_85c, reg_a74; - - if (initialize == 0) { - reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, - MASKDWORD) & 0x1CC000) >> 14; - - reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1, - MASKDWORD) & BIT(3)) >> 3; - - reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, - MASKDWORD) & 0xFF000000) >> 24; - - reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12; - - initialize = 1; - } - - if (!bforce_in_normal) { - if (dm_pstable.rssi_val_min != 0) { - if (dm_pstable.pre_rfstate == RF_NORMAL) { - if (dm_pstable.rssi_val_min >= 30) - dm_pstable.cur_rfstate = RF_SAVE; - else - dm_pstable.cur_rfstate = RF_NORMAL; - } else { - if (dm_pstable.rssi_val_min <= 25) - dm_pstable.cur_rfstate = RF_NORMAL; - else - dm_pstable.cur_rfstate = RF_SAVE; - } - } else { - dm_pstable.cur_rfstate = RF_MAX; - } - } else { - dm_pstable.cur_rfstate = RF_NORMAL; - } - - if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) { - if (dm_pstable.cur_rfstate == RF_SAVE) { - rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, - 0x1C0000, 0x2); - rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0); - rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, - 0xFF000000, 0x63); - rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, - 0xC000, 0x2); - rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3); - rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); - rtl_set_bbreg(hw, 0x818, BIT(28), 0x1); - } else { - rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, - 0x1CC000, reg_874); - rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), - reg_c70); - rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000, - reg_85c); - rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74); - rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); - } - - dm_pstable.pre_rfstate = dm_pstable.cur_rfstate; - } -} - -static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - - if (((mac->link_state == MAC80211_NOLINK)) && - (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { - dm_pstable.rssi_val_min = 0; - RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, - ("Not connected to any\n")); - } - - if (mac->link_state == MAC80211_LINKED) { - if (mac->opmode == NL80211_IFTYPE_ADHOC) { - dm_pstable.rssi_val_min = - rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; - RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, - ("AP Client PWDB = 0x%lx\n", - dm_pstable.rssi_val_min)); - } else { - dm_pstable.rssi_val_min = - rtlpriv->dm.undecorated_smoothed_pwdb; - RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, - ("STA Default Port PWDB = 0x%lx\n", - dm_pstable.rssi_val_min)); - } - } else { - dm_pstable.rssi_val_min = - rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; - - RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, - ("AP Ext Port PWDB = 0x%lx\n", - dm_pstable.rssi_val_min)); - } - - if (IS_92C_SERIAL(rtlhal->version)) - rtl92c_dm_1r_cca(hw); -} - -void rtl92c_dm_init(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; - rtl92c_dm_diginit(hw); - rtl92c_dm_init_dynamic_txpower(hw); - rtl92c_dm_init_edca_turbo(hw); - rtl92c_dm_init_rate_adaptive_mask(hw); - rtl92c_dm_initialize_txpower_tracking(hw); - rtl92c_dm_init_dynamic_bb_powersaving(hw); -} - -void rtl92c_dm_watchdog(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - bool b_fw_current_inpsmode = false; - bool b_fw_ps_awake = true; - - rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS, - (u8 *) (&b_fw_current_inpsmode)); - rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON, - (u8 *) (&b_fw_ps_awake)); - - if ((ppsc->rfpwr_state == ERFON) && ((!b_fw_current_inpsmode) && - b_fw_ps_awake) - && (!ppsc->rfchange_inprogress)) { - rtl92c_dm_pwdb_monitor(hw); - rtl92c_dm_dig(hw); - rtl92c_dm_false_alarm_counter_statistics(hw); - rtl92c_dm_dynamic_bb_powersaving(hw); - rtl92c_dm_dynamic_txpower(hw); - rtl92c_dm_check_txpower_tracking(hw); - rtl92c_dm_refresh_rate_adaptive_mask(hw); - rtl92c_dm_check_edca_turbo(hw); - } -} diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h index 463439e4074c..36302ebae4a3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h @@ -192,5 +192,6 @@ void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw); void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw); void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal); +void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index 1c41a0c93506..05477f465a75 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@ -37,7 +37,6 @@ #include "def.h" #include "phy.h" #include "dm.h" -#include "fw.h" #include "led.h" #include "hw.h" @@ -124,7 +123,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; } case HW_VAR_FW_PSMODE_STATUS: - *((bool *) (val)) = ppsc->b_fw_current_inpsmode; + *((bool *) (val)) = ppsc->fw_current_inpsmode; break; case HW_VAR_CORRECT_TSF:{ u64 tsf; @@ -173,15 +172,15 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; } case HW_VAR_BASIC_RATE:{ - u16 b_rate_cfg = ((u16 *) val)[0]; + u16 rate_cfg = ((u16 *) val)[0]; u8 rate_index = 0; - b_rate_cfg = b_rate_cfg & 0x15f; - b_rate_cfg |= 0x01; - rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff); + rate_cfg &= 0x15f; + rate_cfg |= 0x01; + rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); rtl_write_byte(rtlpriv, REG_RRSR + 1, - (b_rate_cfg >> 8)&0xff); - while (b_rate_cfg > 0x1) { - b_rate_cfg = (b_rate_cfg >> 1); + (rate_cfg >> 8)&0xff); + while (rate_cfg > 0x1) { + rate_cfg = (rate_cfg >> 1); rate_index++; } rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, @@ -318,15 +317,17 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } case HW_VAR_AC_PARAM:{ u8 e_aci = *((u8 *) val); - u32 u4b_ac_param = 0; + u32 u4b_ac_param; + u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min); + u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max); + u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op); - u4b_ac_param |= (u32) mac->ac[e_aci].aifs; - u4b_ac_param |= ((u32) mac->ac[e_aci].cw_min + u4b_ac_param = (u32) mac->ac[e_aci].aifs; + u4b_ac_param |= ((u32)cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET; - u4b_ac_param |= ((u32) mac->ac[e_aci].cw_max & + u4b_ac_param |= ((u32)cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET; - u4b_ac_param |= (u32) mac->ac[e_aci].tx_op - << AC_PARAM_TXOP_LIMIT_OFFSET; + u4b_ac_param |= (u32)tx_op << AC_PARAM_TXOP_OFFSET; RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, ("queue:%x, ac_param:%x\n", e_aci, @@ -469,12 +470,12 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) break; } case HW_VAR_FW_PSMODE_STATUS: - ppsc->b_fw_current_inpsmode = *((bool *) val); + ppsc->fw_current_inpsmode = *((bool *) val); break; case HW_VAR_H2C_FW_JOINBSSRPT:{ u8 mstatus = (*(u8 *) val); u8 tmp_regcr, tmp_reg422; - bool b_recover = false; + bool recover = false; if (mstatus == RT_MEDIA_CONNECT) { rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID, @@ -491,7 +492,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); if (tmp_reg422 & BIT(6)) - b_recover = true; + recover = true; rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422 & (~BIT(6))); @@ -500,7 +501,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0); _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4)); - if (b_recover) { + if (recover) { rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp_reg422); @@ -868,7 +869,7 @@ static void _rtl92ce_enable_aspm_back_door(struct ieee80211_hw *hw) rtl_write_word(rtlpriv, 0x350, 0x870c); rtl_write_byte(rtlpriv, 0x352, 0x1); - if (ppsc->b_support_backdoor) + if (ppsc->support_backdoor) rtl_write_byte(rtlpriv, 0x349, 0x1b); else rtl_write_byte(rtlpriv, 0x349, 0x03); @@ -940,15 +941,15 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw) ("Failed to download FW. Init HW " "without FW now..\n")); err = 1; - rtlhal->bfw_ready = false; + rtlhal->fw_ready = false; return err; } else { - rtlhal->bfw_ready = true; + rtlhal->fw_ready = true; } rtlhal->last_hmeboxnum = 0; - rtl92c_phy_mac_config(hw); - rtl92c_phy_bb_config(hw); + rtl92ce_phy_mac_config(hw); + rtl92ce_phy_bb_config(hw); rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; rtl92c_phy_rf_config(hw); rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0, @@ -1170,21 +1171,20 @@ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - u32 u4b_ac_param; + u16 cw_min = le16_to_cpu(mac->ac[aci].cw_min); + u16 cw_max = le16_to_cpu(mac->ac[aci].cw_max); + u16 tx_op = le16_to_cpu(mac->ac[aci].tx_op); rtl92c_dm_init_edca_turbo(hw); - u4b_ac_param = (u32) mac->ac[aci].aifs; - u4b_ac_param |= - ((u32) mac->ac[aci].cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET; - u4b_ac_param |= - ((u32) mac->ac[aci].cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET; - u4b_ac_param |= (u32) mac->ac[aci].tx_op << AC_PARAM_TXOP_LIMIT_OFFSET; + u4b_ac_param |= (u32) ((cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET); + u4b_ac_param |= (u32) ((cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET); + u4b_ac_param |= (u32) (tx_op << AC_PARAM_TXOP_OFFSET); RT_TRACE(rtlpriv, COMP_QOS, DBG_DMESG, ("queue:%x, ac_param:%x aifs:%x cwmin:%x cwmax:%x txop:%x\n", - aci, u4b_ac_param, mac->ac[aci].aifs, mac->ac[aci].cw_min, - mac->ac[aci].cw_max, mac->ac[aci].tx_op)); + aci, u4b_ac_param, mac->ac[aci].aifs, cw_min, + cw_max, tx_op)); switch (aci) { case AC1_BK: rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param); @@ -1237,7 +1237,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0); - if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->bfw_ready) + if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->fw_ready) rtl92c_firmware_selfreset(hw); rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51); rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); @@ -1335,19 +1335,6 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw, rtl92ce_enable_interrupt(hw); } -static u8 _rtl92c_get_chnl_group(u8 chnl) -{ - u8 group; - - if (chnl < 3) - group = 0; - else if (chnl < 9) - group = 1; - else - group = 2; - return group; -} - static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, bool autoload_fail, u8 *hwinfo) @@ -1568,7 +1555,7 @@ static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, rtlefuse->eeprom_thermalmeter = (tempval & 0x1f); if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail) - rtlefuse->b_apk_thermalmeterignore = true; + rtlefuse->apk_thermalmeterignore = true; rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; RTPRINT(rtlpriv, FINIT, INIT_TxPower, @@ -1625,7 +1612,7 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw) rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; - rtlefuse->b_txpwr_fromeprom = true; + rtlefuse->txpwr_fromeprom = true; rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, @@ -1668,7 +1655,7 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw) switch (rtlhal->oem_id) { case RT_CID_819x_HP: - pcipriv->ledctl.bled_opendrain = true; + pcipriv->ledctl.led_opendrain = true; break; case RT_CID_819x_Lenovo: case RT_CID_DEFAULT: @@ -1693,10 +1680,10 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw) rtlhal->version = _rtl92ce_read_chip_version(hw); if (get_rf_type(rtlphy) == RF_1T1R) - rtlpriv->dm.brfpath_rxenable[0] = true; + rtlpriv->dm.rfpath_rxenable[0] = true; else - rtlpriv->dm.brfpath_rxenable[0] = - rtlpriv->dm.brfpath_rxenable[1] = true; + rtlpriv->dm.rfpath_rxenable[0] = + rtlpriv->dm.rfpath_rxenable[1] = true; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n", rtlhal->version)); tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); @@ -1725,18 +1712,18 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw) struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); u32 ratr_value = (u32) mac->basic_rates; - u8 *p_mcsrate = mac->mcs; + u8 *mcsrate = mac->mcs; u8 ratr_index = 0; - u8 b_nmode = mac->ht_enable; + u8 nmode = mac->ht_enable; u8 mimo_ps = 1; u16 shortgi_rate; u32 tmp_ratr_value; - u8 b_curtxbw_40mhz = mac->bw_40; - u8 b_curshortgi_40mhz = mac->sgi_40; - u8 b_curshortgi_20mhz = mac->sgi_20; + u8 curtxbw_40mhz = mac->bw_40; + u8 curshortgi_40mhz = mac->sgi_40; + u8 curshortgi_20mhz = mac->sgi_20; enum wireless_mode wirelessmode = mac->mode; - ratr_value |= EF2BYTE((*(u16 *) (p_mcsrate))) << 12; + ratr_value |= ((*(u16 *) (mcsrate))) << 12; switch (wirelessmode) { case WIRELESS_MODE_B: @@ -1750,7 +1737,7 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw) break; case WIRELESS_MODE_N_24G: case WIRELESS_MODE_N_5G: - b_nmode = 1; + nmode = 1; if (mimo_ps == 0) { ratr_value &= 0x0007F005; } else { @@ -1776,9 +1763,8 @@ void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw) ratr_value &= 0x0FFFFFFF; - if (b_nmode && ((b_curtxbw_40mhz && - b_curshortgi_40mhz) || (!b_curtxbw_40mhz && - b_curshortgi_20mhz))) { + if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || (!curtxbw_40mhz && + curshortgi_20mhz))) { ratr_value |= 0x10000000; tmp_ratr_value = (ratr_value >> 12); @@ -1806,11 +1792,11 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) u32 ratr_bitmap = (u32) mac->basic_rates; u8 *p_mcsrate = mac->mcs; u8 ratr_index; - u8 b_curtxbw_40mhz = mac->bw_40; - u8 b_curshortgi_40mhz = mac->sgi_40; - u8 b_curshortgi_20mhz = mac->sgi_20; + u8 curtxbw_40mhz = mac->bw_40; + u8 curshortgi_40mhz = mac->sgi_40; + u8 curshortgi_20mhz = mac->sgi_20; enum wireless_mode wirelessmode = mac->mode; - bool b_shortgi = false; + bool shortgi = false; u8 rate_mask[5]; u8 macid = 0; u8 mimops = 1; @@ -1852,7 +1838,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) } else { if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) { - if (b_curtxbw_40mhz) { + if (curtxbw_40mhz) { if (rssi_level == 1) ratr_bitmap &= 0x000f0000; else if (rssi_level == 2) @@ -1868,7 +1854,7 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) ratr_bitmap &= 0x000ff005; } } else { - if (b_curtxbw_40mhz) { + if (curtxbw_40mhz) { if (rssi_level == 1) ratr_bitmap &= 0x0f0f0000; else if (rssi_level == 2) @@ -1886,13 +1872,13 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) } } - if ((b_curtxbw_40mhz && b_curshortgi_40mhz) || - (!b_curtxbw_40mhz && b_curshortgi_20mhz)) { + if ((curtxbw_40mhz && curshortgi_40mhz) || + (!curtxbw_40mhz && curshortgi_20mhz)) { if (macid == 0) - b_shortgi = true; + shortgi = true; else if (macid == 1) - b_shortgi = false; + shortgi = false; } break; default: @@ -1906,9 +1892,9 @@ void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) } RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("ratr_bitmap :%x\n", ratr_bitmap)); - *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) | - (ratr_index << 28)); - rate_mask[4] = macid | (b_shortgi ? 0x20 : 0x00) | 0x80; + *(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) | + (ratr_index << 28); + rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, " "ratr_val:%x, %x:%x:%x:%x:%x\n", ratr_index, ratr_bitmap, @@ -1940,13 +1926,13 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; u8 u1tmp; - bool b_actuallyset = false; + bool actuallyset = false; unsigned long flag; if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter)) return false; - if (ppsc->b_swrf_processing) + if (ppsc->swrf_processing) return false; spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); @@ -1972,24 +1958,24 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL); e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF; - if ((ppsc->b_hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) { + if ((ppsc->hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, ("GPIOChangeRF - HW Radio ON, RF ON\n")); e_rfpowerstate_toset = ERFON; - ppsc->b_hwradiooff = false; - b_actuallyset = true; - } else if ((ppsc->b_hwradiooff == false) + ppsc->hwradiooff = false; + actuallyset = true; + } else if ((ppsc->hwradiooff == false) && (e_rfpowerstate_toset == ERFOFF)) { RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, ("GPIOChangeRF - HW Radio OFF, RF OFF\n")); e_rfpowerstate_toset = ERFOFF; - ppsc->b_hwradiooff = true; - b_actuallyset = true; + ppsc->hwradiooff = true; + actuallyset = true; } - if (b_actuallyset) { + if (actuallyset) { if (e_rfpowerstate_toset == ERFON) { if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) { @@ -2028,7 +2014,7 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) } *valid = 1; - return !ppsc->b_hwradiooff; + return !ppsc->hwradiooff; } diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h index 305c819c8c78..a3dfdb635168 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h @@ -30,6 +30,8 @@ #ifndef __RTL92CE_HW_H__ #define __RTL92CE_HW_H__ +#define H2C_RA_MASK 6 + void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw); void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw, @@ -53,5 +55,14 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw); void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr, bool is_group, u8 enc_algo, bool is_wepkey, bool clear_all); +bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); +void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); +void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); +void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); +int rtl92c_download_fw(struct ieee80211_hw *hw); +void rtl92c_firmware_selfreset(struct ieee80211_hw *hw); +void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, + u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); +bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c index 78a0569208ea..7b1da8d7508f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c @@ -57,7 +57,7 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) ("switch case not process\n")); break; } - pled->b_ledon = true; + pled->ledon = true; } void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) @@ -76,7 +76,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) break; case LED_PIN_LED0: ledcfg &= 0xf0; - if (pcipriv->ledctl.bled_opendrain == true) + if (pcipriv->ledctl.led_opendrain == true) rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(1) | BIT(5) | BIT(6))); else @@ -92,7 +92,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) ("switch case not process\n")); break; } - pled->b_ledon = false; + pled->ledon = false; } void rtl92ce_init_sw_leds(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c index 45044117139a..d0541e8c6012 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c @@ -32,89 +32,13 @@ #include "../ps.h" #include "reg.h" #include "def.h" +#include "hw.h" #include "phy.h" #include "rf.h" #include "dm.h" #include "table.h" -static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset); -static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset, - u32 data); -static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset); -static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset, - u32 data); -static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask); -static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw); -static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); -static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, - u8 configtype); -static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, - u8 configtype); -static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); -static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, - u32 cmdtableidx, u32 cmdtablesz, - enum swchnlcmd_id cmdid, u32 para1, - u32 para2, u32 msdelay); -static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, - u8 channel, u8 *stage, u8 *step, - u32 *delay); -static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, - enum wireless_mode wirelessmode, - long power_indbm); -static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw, - enum radio_path rfpath); -static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, - enum wireless_mode wirelessmode, - u8 txpwridx); -u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 returnvalue, originalvalue, bitshift; - - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), " - "bitmask(%#x)\n", regaddr, - bitmask)); - originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); - returnvalue = (originalvalue & bitmask) >> bitshift; - - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x " - "Addr[0x%x]=0x%x\n", bitmask, - regaddr, originalvalue)); - - return returnvalue; - -} - -void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, - u32 regaddr, u32 bitmask, u32 data) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 originalvalue, bitshift; - - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x)," - " data(%#x)\n", regaddr, bitmask, - data)); - - if (bitmask != MASKDWORD) { - originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); - data = ((originalvalue & (~bitmask)) | (data << bitshift)); - } - - rtl_write_dword(rtlpriv, regaddr, data); - - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x)," - " data(%#x)\n", regaddr, bitmask, - data)); - -} - -u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, +u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -149,7 +73,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, return readback_value; } -void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw, +void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask, u32 data) { @@ -197,137 +121,25 @@ void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw, bitmask, data, rfpath)); } -static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset) -{ - RT_ASSERT(false, ("deprecated!\n")); - return 0; -} - -static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset, - u32 data) -{ - RT_ASSERT(false, ("deprecated!\n")); -} - -static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; - u32 newoffset; - u32 tmplong, tmplong2; - u8 rfpi_enable = 0; - u32 retvalue; - - offset &= 0x3f; - newoffset = offset; - if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n")); - return 0xFFFFFFFF; - } - tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD); - if (rfpath == RF90_PATH_A) - tmplong2 = tmplong; - else - tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD); - tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) | - (newoffset << 23) | BLSSIREADEDGE; - rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, - tmplong & (~BLSSIREADEDGE)); - mdelay(1); - rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2); - mdelay(1); - rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD, - tmplong | BLSSIREADEDGE); - mdelay(1); - if (rfpath == RF90_PATH_A) - rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, - BIT(8)); - else if (rfpath == RF90_PATH_B) - rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, - BIT(8)); - if (rfpi_enable) - retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi, - BLSSIREADBACKDATA); - else - retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback, - BLSSIREADBACKDATA); - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n", - rfpath, pphyreg->rflssi_readback, - retvalue)); - return retvalue; -} - -static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, - enum radio_path rfpath, u32 offset, - u32 data) -{ - u32 data_and_addr; - u32 newoffset; - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath]; - - if (RT_CANNOT_IO(hw)) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n")); - return; - } - offset &= 0x3f; - newoffset = offset; - data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff; - rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr); - RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n", - rfpath, pphyreg->rf3wire_offset, - data_and_addr)); -} - -static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask) -{ - u32 i; - - for (i = 0; i <= 31; i++) { - if (((bitmask >> i) & 0x1) == 1) - break; - } - return i; -} - -static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw) -{ - rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2); - rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022); - rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45); - rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23); - rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1); - rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2); - rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2); - rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2); - rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2); - rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2); -} - -bool rtl92c_phy_mac_config(struct ieee80211_hw *hw) +bool rtl92ce_phy_mac_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); bool is92c = IS_92C_SERIAL(rtlhal->version); - bool rtstatus = _rtl92c_phy_config_mac_with_headerfile(hw); + bool rtstatus = _rtl92ce_phy_config_mac_with_headerfile(hw); if (is92c) rtl_write_byte(rtlpriv, 0x14, 0x71); return rtstatus; } -bool rtl92c_phy_bb_config(struct ieee80211_hw *hw) +bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw) { bool rtstatus = true; struct rtl_priv *rtlpriv = rtl_priv(hw); u16 regval; u32 regvaldw; - u8 b_reg_hwparafile = 1; + u8 reg_hwparafile = 1; _rtl92c_phy_init_bb_rf_register_definition(hw); regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); @@ -342,56 +154,12 @@ bool rtl92c_phy_bb_config(struct ieee80211_hw *hw) rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80); regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0); rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23)); - if (b_reg_hwparafile == 1) + if (reg_hwparafile == 1) rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw); return rtstatus; } -bool rtl92c_phy_rf_config(struct ieee80211_hw *hw) -{ - return rtl92c_phy_rf6052_config(hw); -} - -static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - bool rtstatus; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n")); - rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw, - BASEBAND_CONFIG_PHY_REG); - if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!")); - return false; - } - if (rtlphy->rf_type == RF_1T2R) { - _rtl92c_phy_bb_config_1t(hw); - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n")); - } - if (rtlefuse->autoload_failflag == false) { - rtlphy->pwrgroup_cnt = 0; - rtstatus = _rtl92c_phy_config_bb_with_pgheaderfile(hw, - BASEBAND_CONFIG_PHY_REG); - } - if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!")); - return false; - } - rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw, - BASEBAND_CONFIG_AGC_TAB); - if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n")); - return false; - } - rtlphy->bcck_high_power = (bool) (rtl_get_bbreg(hw, - RFPGA0_XA_HSSIPARAMETER2, - 0x200)); - return true; -} - -static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) +bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u32 i; @@ -408,11 +176,7 @@ static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) return true; } -void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw) -{ -} - -static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, +bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, u8 configtype) { int i; @@ -456,7 +220,6 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, phy_regarray_table[i], phy_regarray_table[i + 1])); } - rtl92c_phy_config_bb_external_pa(hw); } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { for (i = 0; i < agctab_arraylen; i = i + 2) { rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD, @@ -472,175 +235,7 @@ static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, return true; } -static void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, - u32 regaddr, u32 bitmask, - u32 data) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - if (regaddr == RTXAGC_A_RATE18_06) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] = - data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][0])); - } - if (regaddr == RTXAGC_A_RATE54_24) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] = - data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][1])); - } - if (regaddr == RTXAGC_A_CCK1_MCS32) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] = - data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][6])); - } - if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] = - data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][7])); - } - if (regaddr == RTXAGC_A_MCS03_MCS00) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] = - data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][2])); - } - if (regaddr == RTXAGC_A_MCS07_MCS04) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] = - data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][3])); - } - if (regaddr == RTXAGC_A_MCS11_MCS08) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] = - data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][4])); - } - if (regaddr == RTXAGC_A_MCS15_MCS12) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] = - data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][5])); - } - if (regaddr == RTXAGC_B_RATE18_06) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] = - data; - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][8])); - } - if (regaddr == RTXAGC_B_RATE54_24) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] = - data; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][9])); - } - - if (regaddr == RTXAGC_B_CCK1_55_MCS32) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] = - data; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][14])); - } - - if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] = - data; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][15])); - } - - if (regaddr == RTXAGC_B_MCS03_MCS00) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] = - data; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][10])); - } - - if (regaddr == RTXAGC_B_MCS07_MCS04) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] = - data; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][11])); - } - - if (regaddr == RTXAGC_B_MCS11_MCS08) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] = - data; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][12])); - } - - if (regaddr == RTXAGC_B_MCS15_MCS12) { - rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] = - data; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n", - rtlphy->pwrgroup_cnt, - rtlphy->mcs_txpwrlevel_origoffset[rtlphy-> - pwrgroup_cnt][13])); - - rtlphy->pwrgroup_cnt++; - } -} - -static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, +bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, u8 configtype) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -679,13 +274,7 @@ static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, return true; } -static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw, - enum radio_path rfpath) -{ - return true; -} - -bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, +bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, enum radio_path rfpath) { @@ -740,7 +329,6 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, udelay(1); } } - _rtl92c_phy_config_rf_external_pa(hw, rfpath); break; case RF90_PATH_B: for (i = 0; i < radiob_arraylen; i = i + 2) { @@ -776,346 +364,7 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, return true; } -void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - rtlphy->default_initialgain[0] = - (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0); - rtlphy->default_initialgain[1] = - (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0); - rtlphy->default_initialgain[2] = - (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0); - rtlphy->default_initialgain[3] = - (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("Default initial gain (c50=0x%x, " - "c58=0x%x, c60=0x%x, c68=0x%x\n", - rtlphy->default_initialgain[0], - rtlphy->default_initialgain[1], - rtlphy->default_initialgain[2], - rtlphy->default_initialgain[3])); - - rtlphy->framesync = (u8) rtl_get_bbreg(hw, - ROFDM0_RXDETECTOR3, MASKBYTE0); - rtlphy->framesync_c34 = rtl_get_bbreg(hw, - ROFDM0_RXDETECTOR2, MASKDWORD); - - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - ("Default framesync (0x%x) = 0x%x\n", - ROFDM0_RXDETECTOR3, rtlphy->framesync)); -} - -static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW; - rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW; - rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW; - rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW; - - rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB; - rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB; - rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB; - rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB; - - rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE; - rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE; - - rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE; - rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE; - - rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset = - RFPGA0_XA_LSSIPARAMETER; - rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset = - RFPGA0_XB_LSSIPARAMETER; - - rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER; - rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER; - - rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE; - rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE; - rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE; - rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE; - - rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1; - rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1; - - rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2; - rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2; - - rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control = - RFPGA0_XAB_SWITCHCONTROL; - rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control = - RFPGA0_XAB_SWITCHCONTROL; - rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control = - RFPGA0_XCD_SWITCHCONTROL; - rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control = - RFPGA0_XCD_SWITCHCONTROL; - - rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1; - rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1; - rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1; - rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1; - - rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2; - rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2; - rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2; - rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2; - - rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance = - ROFDM0_XARXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance = - ROFDM0_XBRXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance = - ROFDM0_XCRXIQIMBANLANCE; - rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance = - ROFDM0_XDRXIQIMBALANCE; - - rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE; - rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE; - rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE; - rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE; - - rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance = - ROFDM0_XATXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance = - ROFDM0_XBTXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance = - ROFDM0_XCTXIQIMBALANCE; - rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance = - ROFDM0_XDTXIQIMBALANCE; - - rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE; - rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE; - rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE; - rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE; - - rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback = - RFPGA0_XA_LSSIREADBACK; - rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback = - RFPGA0_XB_LSSIREADBACK; - rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback = - RFPGA0_XC_LSSIREADBACK; - rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback = - RFPGA0_XD_LSSIREADBACK; - - rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi = - TRANSCEIVEA_HSPI_READBACK; - rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi = - TRANSCEIVEB_HSPI_READBACK; - -} - -void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 txpwr_level; - long txpwr_dbm; - - txpwr_level = rtlphy->cur_cck_txpwridx; - txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw, - WIRELESS_MODE_B, txpwr_level); - txpwr_level = rtlphy->cur_ofdm24g_txpwridx + - rtlefuse->legacy_ht_txpowerdiff; - if (_rtl92c_phy_txpwr_idx_to_dbm(hw, - WIRELESS_MODE_G, - txpwr_level) > txpwr_dbm) - txpwr_dbm = - _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G, - txpwr_level); - txpwr_level = rtlphy->cur_ofdm24g_txpwridx; - if (_rtl92c_phy_txpwr_idx_to_dbm(hw, - WIRELESS_MODE_N_24G, - txpwr_level) > txpwr_dbm) - txpwr_dbm = - _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G, - txpwr_level); - *powerlevel = txpwr_dbm; -} - -static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel, - u8 *cckpowerlevel, u8 *ofdmpowerlevel) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 index = (channel - 1); - - cckpowerlevel[RF90_PATH_A] = - rtlefuse->txpwrlevel_cck[RF90_PATH_A][index]; - cckpowerlevel[RF90_PATH_B] = - rtlefuse->txpwrlevel_cck[RF90_PATH_B][index]; - if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) { - ofdmpowerlevel[RF90_PATH_A] = - rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index]; - ofdmpowerlevel[RF90_PATH_B] = - rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index]; - } else if (get_rf_type(rtlphy) == RF_2T2R) { - ofdmpowerlevel[RF90_PATH_A] = - rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index]; - ofdmpowerlevel[RF90_PATH_B] = - rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index]; - } -} - -static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw, - u8 channel, u8 *cckpowerlevel, - u8 *ofdmpowerlevel) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - rtlphy->cur_cck_txpwridx = cckpowerlevel[0]; - rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0]; -} - -void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel) -{ - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 cckpowerlevel[2], ofdmpowerlevel[2]; - - if (rtlefuse->b_txpwr_fromeprom == false) - return; - _rtl92c_get_txpower_index(hw, channel, - &cckpowerlevel[0], &ofdmpowerlevel[0]); - _rtl92c_ccxpower_index_check(hw, - channel, &cckpowerlevel[0], - &ofdmpowerlevel[0]); - rtl92c_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]); - rtl92c_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel); -} - -bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u8 idx; - u8 rf_path; - - u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw, - WIRELESS_MODE_B, - power_indbm); - u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw, - WIRELESS_MODE_N_24G, - power_indbm); - if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0) - ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff; - else - ofdmtxpwridx = 0; - RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE, - ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n", - power_indbm, ccktxpwridx, ofdmtxpwridx)); - for (idx = 0; idx < 14; idx++) { - for (rf_path = 0; rf_path < 2; rf_path++) { - rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx; - rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] = - ofdmtxpwridx; - rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] = - ofdmtxpwridx; - } - } - rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); - return true; -} - -void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval) -{ -} - -static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw, - enum wireless_mode wirelessmode, - long power_indbm) -{ - u8 txpwridx; - long offset; - - switch (wirelessmode) { - case WIRELESS_MODE_B: - offset = -7; - break; - case WIRELESS_MODE_G: - case WIRELESS_MODE_N_24G: - offset = -8; - break; - default: - offset = -8; - break; - } - - if ((power_indbm - offset) > 0) - txpwridx = (u8) ((power_indbm - offset) * 2); - else - txpwridx = 0; - - if (txpwridx > MAX_TXPWR_IDX_NMODE_92S) - txpwridx = MAX_TXPWR_IDX_NMODE_92S; - - return txpwridx; -} - -static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, - enum wireless_mode wirelessmode, - u8 txpwridx) -{ - long offset; - long pwrout_dbm; - - switch (wirelessmode) { - case WIRELESS_MODE_B: - offset = -7; - break; - case WIRELESS_MODE_G: - case WIRELESS_MODE_N_24G: - offset = -8; - break; - default: - offset = -8; - break; - } - pwrout_dbm = txpwridx / 2 + offset; - return pwrout_dbm; -} - -void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - enum io_type iotype; - - if (!is_hal_stop(rtlhal)) { - switch (operation) { - case SCAN_OPT_BACKUP: - iotype = IO_CMD_PAUSE_DM_BY_SCAN; - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_IO_CMD, - (u8 *)&iotype); - - break; - case SCAN_OPT_RESTORE: - iotype = IO_CMD_RESUME_DM_BY_SCAN; - rtlpriv->cfg->ops->set_hw_reg(hw, - HW_VAR_IO_CMD, - (u8 *)&iotype); - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - ("Unknown Scan Backup operation.\n")); - break; - } - } -} - -void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw) +void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -1183,645 +432,7 @@ void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw) RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n")); } -void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw, - enum nl80211_channel_type ch_type) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u8 tmp_bw = rtlphy->current_chan_bw; - - if (rtlphy->set_bwmode_inprogress) - return; - rtlphy->set_bwmode_inprogress = true; - if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) - rtl92c_phy_set_bw_mode_callback(hw); - else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - ("FALSE driver sleep or unload\n")); - rtlphy->set_bwmode_inprogress = false; - rtlphy->current_chan_bw = tmp_bw; - } -} - -void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - u32 delay; - - RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, - ("switch to channel%d\n", rtlphy->current_channel)); - if (is_hal_stop(rtlhal)) - return; - do { - if (!rtlphy->sw_chnl_inprogress) - break; - if (!_rtl92c_phy_sw_chnl_step_by_step - (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage, - &rtlphy->sw_chnl_step, &delay)) { - if (delay > 0) - mdelay(delay); - else - continue; - } else - rtlphy->sw_chnl_inprogress = false; - break; - } while (true); - RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n")); -} - -u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - - if (rtlphy->sw_chnl_inprogress) - return 0; - if (rtlphy->set_bwmode_inprogress) - return 0; - RT_ASSERT((rtlphy->current_channel <= 14), - ("WIRELESS_MODE_G but channel>14")); - rtlphy->sw_chnl_inprogress = true; - rtlphy->sw_chnl_stage = 0; - rtlphy->sw_chnl_step = 0; - if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { - rtl92c_phy_sw_chnl_callback(hw); - RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, - ("sw_chnl_inprogress false schdule workitem\n")); - rtlphy->sw_chnl_inprogress = false; - } else { - RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, - ("sw_chnl_inprogress false driver sleep or" - " unload\n")); - rtlphy->sw_chnl_inprogress = false; - } - return 1; -} - -static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, - u8 channel, u8 *stage, u8 *step, - u32 *delay) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct swchnlcmd precommoncmd[MAX_PRECMD_CNT]; - u32 precommoncmdcnt; - struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT]; - u32 postcommoncmdcnt; - struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT]; - u32 rfdependcmdcnt; - struct swchnlcmd *currentcmd = NULL; - u8 rfpath; - u8 num_total_rfpath = rtlphy->num_total_rfpath; - - precommoncmdcnt = 0; - _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, - MAX_PRECMD_CNT, - CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0); - _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++, - MAX_PRECMD_CNT, CMDID_END, 0, 0, 0); - - postcommoncmdcnt = 0; - - _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++, - MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0); - - rfdependcmdcnt = 0; - - RT_ASSERT((channel >= 1 && channel <= 14), - ("illegal channel for Zebra: %d\n", channel)); - - _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, - MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG, - RF_CHNLBW, channel, 10); - - _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++, - MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0, - 0); - - do { - switch (*stage) { - case 0: - currentcmd = &precommoncmd[*step]; - break; - case 1: - currentcmd = &rfdependcmd[*step]; - break; - case 2: - currentcmd = &postcommoncmd[*step]; - break; - } - - if (currentcmd->cmdid == CMDID_END) { - if ((*stage) == 2) { - return true; - } else { - (*stage)++; - (*step) = 0; - continue; - } - } - - switch (currentcmd->cmdid) { - case CMDID_SET_TXPOWEROWER_LEVEL: - rtl92c_phy_set_txpower_level(hw, channel); - break; - case CMDID_WRITEPORT_ULONG: - rtl_write_dword(rtlpriv, currentcmd->para1, - currentcmd->para2); - break; - case CMDID_WRITEPORT_USHORT: - rtl_write_word(rtlpriv, currentcmd->para1, - (u16) currentcmd->para2); - break; - case CMDID_WRITEPORT_UCHAR: - rtl_write_byte(rtlpriv, currentcmd->para1, - (u8) currentcmd->para2); - break; - case CMDID_RF_WRITEREG: - for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) { - rtlphy->rfreg_chnlval[rfpath] = - ((rtlphy->rfreg_chnlval[rfpath] & - 0xfffffc00) | currentcmd->para2); - - rtl_set_rfreg(hw, (enum radio_path)rfpath, - currentcmd->para1, - RFREG_OFFSET_MASK, - rtlphy->rfreg_chnlval[rfpath]); - } - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - ("switch case not process\n")); - break; - } - - break; - } while (true); - - (*delay) = currentcmd->msdelay; - (*step)++; - return false; -} - -static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable, - u32 cmdtableidx, u32 cmdtablesz, - enum swchnlcmd_id cmdid, - u32 para1, u32 para2, u32 msdelay) -{ - struct swchnlcmd *pcmd; - - if (cmdtable == NULL) { - RT_ASSERT(false, ("cmdtable cannot be NULL.\n")); - return false; - } - - if (cmdtableidx >= cmdtablesz) - return false; - - pcmd = cmdtable + cmdtableidx; - pcmd->cmdid = cmdid; - pcmd->para1 = para1; - pcmd->para2 = para2; - pcmd->msdelay = msdelay; - return true; -} - -bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath) -{ - return true; -} - -static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb) -{ - u32 reg_eac, reg_e94, reg_e9c, reg_ea4; - u8 result = 0x00; - - rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f); - rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f); - rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102); - rtl_set_bbreg(hw, 0xe3c, MASKDWORD, - config_pathb ? 0x28160202 : 0x28160502); - - if (config_pathb) { - rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22); - rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22); - rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102); - rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202); - } - - rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1); - rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000); - rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000); - - mdelay(IQK_DELAY_TIME); - - reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD); - reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD); - reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD); - reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD); - - if (!(reg_eac & BIT(28)) && - (((reg_e94 & 0x03FF0000) >> 16) != 0x142) && - (((reg_e9c & 0x03FF0000) >> 16) != 0x42)) - result |= 0x01; - else - return result; - - if (!(reg_eac & BIT(27)) && - (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) && - (((reg_eac & 0x03FF0000) >> 16) != 0x36)) - result |= 0x02; - return result; -} - -static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw) -{ - u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc; - u8 result = 0x00; - - rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002); - rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000); - mdelay(IQK_DELAY_TIME); - reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD); - reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD); - reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD); - reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD); - reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD); - if (!(reg_eac & BIT(31)) && - (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) && - (((reg_ebc & 0x03FF0000) >> 16) != 0x42)) - result |= 0x01; - else - return result; - - if (!(reg_eac & BIT(30)) && - (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) && - (((reg_ecc & 0x03FF0000) >> 16) != 0x36)) - result |= 0x02; - return result; -} - -static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw, - bool b_iqk_ok, long result[][8], - u8 final_candidate, bool btxonly) -{ - u32 oldval_0, x, tx0_a, reg; - long y, tx0_c; - - if (final_candidate == 0xFF) - return; - else if (b_iqk_ok) { - oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, - MASKDWORD) >> 22) & 0x3FF; - x = result[final_candidate][0]; - if ((x & 0x00000200) != 0) - x = x | 0xFFFFFC00; - tx0_a = (x * oldval_0) >> 8; - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a); - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31), - ((x * oldval_0 >> 7) & 0x1)); - y = result[final_candidate][1]; - if ((y & 0x00000200) != 0) - y = y | 0xFFFFFC00; - tx0_c = (y * oldval_0) >> 8; - rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, - ((tx0_c & 0x3C0) >> 6)); - rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000, - (tx0_c & 0x3F)); - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29), - ((y * oldval_0 >> 7) & 0x1)); - if (btxonly) - return; - reg = result[final_candidate][2]; - rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg); - reg = result[final_candidate][3] & 0x3F; - rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg); - reg = (result[final_candidate][3] >> 6) & 0xF; - rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg); - } -} - -static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw, - bool b_iqk_ok, long result[][8], - u8 final_candidate, bool btxonly) -{ - u32 oldval_1, x, tx1_a, reg; - long y, tx1_c; - - if (final_candidate == 0xFF) - return; - else if (b_iqk_ok) { - oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, - MASKDWORD) >> 22) & 0x3FF; - x = result[final_candidate][4]; - if ((x & 0x00000200) != 0) - x = x | 0xFFFFFC00; - tx1_a = (x * oldval_1) >> 8; - rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a); - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27), - ((x * oldval_1 >> 7) & 0x1)); - y = result[final_candidate][5]; - if ((y & 0x00000200) != 0) - y = y | 0xFFFFFC00; - tx1_c = (y * oldval_1) >> 8; - rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, - ((tx1_c & 0x3C0) >> 6)); - rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000, - (tx1_c & 0x3F)); - rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25), - ((y * oldval_1 >> 7) & 0x1)); - if (btxonly) - return; - reg = result[final_candidate][6]; - rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg); - reg = result[final_candidate][7] & 0x3F; - rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg); - reg = (result[final_candidate][7] >> 6) & 0xF; - rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg); - } -} - -static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw, - u32 *addareg, u32 *addabackup, - u32 registernum) -{ - u32 i; - - for (i = 0; i < registernum; i++) - addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD); -} - -static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw, - u32 *macreg, u32 *macbackup) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - - for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) - macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]); - macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]); -} - -static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw, - u32 *addareg, u32 *addabackup, - u32 regiesternum) -{ - u32 i; - - for (i = 0; i < regiesternum; i++) - rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]); -} - -static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw, - u32 *macreg, u32 *macbackup) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - - for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) - rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]); - rtl_write_dword(rtlpriv, macreg[i], macbackup[i]); -} - -static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw, - u32 *addareg, bool is_patha_on, bool is2t) -{ - u32 pathOn; - u32 i; - - pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4; - if (false == is2t) { - pathOn = 0x0bdb25a0; - rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0); - } else { - rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn); - } - - for (i = 1; i < IQK_ADDA_REG_NUM; i++) - rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn); -} - -static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw, - u32 *macreg, u32 *macbackup) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - - rtl_write_byte(rtlpriv, macreg[0], 0x3F); - - for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) - rtl_write_byte(rtlpriv, macreg[i], - (u8) (macbackup[i] & (~BIT(3)))); - rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5)))); -} - -static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw) -{ - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0); - rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000); - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000); -} - -static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode) -{ - u32 mode; - - mode = pi_mode ? 0x01000100 : 0x01000000; - rtl_set_bbreg(hw, 0x820, MASKDWORD, mode); - rtl_set_bbreg(hw, 0x828, MASKDWORD, mode); -} - -static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw, - long result[][8], u8 c1, u8 c2) -{ - u32 i, j, diff, simularity_bitmap, bound; - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - - u8 final_candidate[2] = { 0xFF, 0xFF }; - bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version); - - if (is2t) - bound = 8; - else - bound = 4; - - simularity_bitmap = 0; - - for (i = 0; i < bound; i++) { - diff = (result[c1][i] > result[c2][i]) ? - (result[c1][i] - result[c2][i]) : - (result[c2][i] - result[c1][i]); - - if (diff > MAX_TOLERANCE) { - if ((i == 2 || i == 6) && !simularity_bitmap) { - if (result[c1][i] + result[c1][i + 1] == 0) - final_candidate[(i / 4)] = c2; - else if (result[c2][i] + result[c2][i + 1] == 0) - final_candidate[(i / 4)] = c1; - else - simularity_bitmap = simularity_bitmap | - (1 << i); - } else - simularity_bitmap = - simularity_bitmap | (1 << i); - } - } - - if (simularity_bitmap == 0) { - for (i = 0; i < (bound / 4); i++) { - if (final_candidate[i] != 0xFF) { - for (j = i * 4; j < (i + 1) * 4 - 2; j++) - result[3][j] = - result[final_candidate[i]][j]; - bresult = false; - } - } - return bresult; - } else if (!(simularity_bitmap & 0x0F)) { - for (i = 0; i < 4; i++) - result[3][i] = result[c1][i]; - return false; - } else if (!(simularity_bitmap & 0xF0) && is2t) { - for (i = 4; i < 8; i++) - result[3][i] = result[c1][i]; - return false; - } else { - return false; - } - -} - -static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, - long result[][8], u8 t, bool is2t) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - u32 i; - u8 patha_ok, pathb_ok; - u32 adda_reg[IQK_ADDA_REG_NUM] = { - 0x85c, 0xe6c, 0xe70, 0xe74, - 0xe78, 0xe7c, 0xe80, 0xe84, - 0xe88, 0xe8c, 0xed0, 0xed4, - 0xed8, 0xedc, 0xee0, 0xeec - }; - - u32 iqk_mac_reg[IQK_MAC_REG_NUM] = { - 0x522, 0x550, 0x551, 0x040 - }; - - const u32 retrycount = 2; - - u32 bbvalue; - - if (t == 0) { - bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD); - - _rtl92c_phy_save_adda_registers(hw, adda_reg, - rtlphy->adda_backup, 16); - _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg, - rtlphy->iqk_mac_backup); - } - _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t); - if (t == 0) { - rtlphy->b_rfpi_enable = (u8) rtl_get_bbreg(hw, - RFPGA0_XA_HSSIPARAMETER1, - BIT(8)); - } - if (!rtlphy->b_rfpi_enable) - _rtl92c_phy_pi_mode_switch(hw, true); - if (t == 0) { - rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD); - rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD); - rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD); - } - rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600); - rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4); - rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000); - if (is2t) { - rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000); - rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000); - } - _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg, - rtlphy->iqk_mac_backup); - rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000); - if (is2t) - rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000); - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000); - rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00); - rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800); - for (i = 0; i < retrycount; i++) { - patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t); - if (patha_ok == 0x03) { - result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) & - 0x3FF0000) >> 16; - result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & - 0x3FF0000) >> 16; - result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) & - 0x3FF0000) >> 16; - result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) & - 0x3FF0000) >> 16; - break; - } else if (i == (retrycount - 1) && patha_ok == 0x01) - result[t][0] = (rtl_get_bbreg(hw, 0xe94, - MASKDWORD) & 0x3FF0000) >> - 16; - result[t][1] = - (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16; - - } - - if (is2t) { - _rtl92c_phy_path_a_standby(hw); - _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t); - for (i = 0; i < retrycount; i++) { - pathb_ok = _rtl92c_phy_path_b_iqk(hw); - if (pathb_ok == 0x03) { - result[t][4] = (rtl_get_bbreg(hw, - 0xeb4, - MASKDWORD) & - 0x3FF0000) >> 16; - result[t][5] = - (rtl_get_bbreg(hw, 0xebc, MASKDWORD) & - 0x3FF0000) >> 16; - result[t][6] = - (rtl_get_bbreg(hw, 0xec4, MASKDWORD) & - 0x3FF0000) >> 16; - result[t][7] = - (rtl_get_bbreg(hw, 0xecc, MASKDWORD) & - 0x3FF0000) >> 16; - break; - } else if (i == (retrycount - 1) && pathb_ok == 0x01) { - result[t][4] = (rtl_get_bbreg(hw, - 0xeb4, - MASKDWORD) & - 0x3FF0000) >> 16; - } - result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) & - 0x3FF0000) >> 16; - } - } - rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04); - rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874); - rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08); - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0); - rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3); - if (is2t) - rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3); - if (t != 0) { - if (!rtlphy->b_rfpi_enable) - _rtl92c_phy_pi_mode_switch(hw, false); - _rtl92c_phy_reload_adda_registers(hw, adda_reg, - rtlphy->adda_backup, 16); - _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg, - rtlphy->iqk_mac_backup); - } -} - -static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) +void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) { u8 tmpreg; u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; @@ -1866,666 +477,6 @@ static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) } } -static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, - char delta, bool is2t) -{ - /* This routine is deliberately dummied out for later fixes */ -#if 0 - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - - u32 reg_d[PATH_NUM]; - u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound; - - u32 bb_backup[APK_BB_REG_NUM]; - u32 bb_reg[APK_BB_REG_NUM] = { - 0x904, 0xc04, 0x800, 0xc08, 0x874 - }; - u32 bb_ap_mode[APK_BB_REG_NUM] = { - 0x00000020, 0x00a05430, 0x02040000, - 0x000800e4, 0x00204000 - }; - u32 bb_normal_ap_mode[APK_BB_REG_NUM] = { - 0x00000020, 0x00a05430, 0x02040000, - 0x000800e4, 0x22204000 - }; - - u32 afe_backup[APK_AFE_REG_NUM]; - u32 afe_reg[APK_AFE_REG_NUM] = { - 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, - 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c, - 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, - 0xeec - }; - - u32 mac_backup[IQK_MAC_REG_NUM]; - u32 mac_reg[IQK_MAC_REG_NUM] = { - 0x522, 0x550, 0x551, 0x040 - }; - - u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = { - {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c}, - {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e} - }; - - u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = { - {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c}, - {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c} - }; - - u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = { - {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d}, - {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050} - }; - - u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = { - {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}, - {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a} - }; - - u32 afe_on_off[PATH_NUM] = { - 0x04db25a4, 0x0b1b25a4 - }; - - u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c }; - - u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 }; - - u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 }; - - u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 }; - - const char apk_delta_mapping[APK_BB_REG_NUM][13] = { - {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, - {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, - {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6}, - {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6}, - {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0} - }; - - const u32 apk_normal_setting_value_1[13] = { - 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28, - 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3, - 0x12680000, 0x00880000, 0x00880000 - }; - - const u32 apk_normal_setting_value_2[16] = { - 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3, - 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025, - 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008, - 0x00050006 - }; - - const u32 apk_result[PATH_NUM][APK_BB_REG_NUM]; - - long bb_offset, delta_v, delta_offset; - - if (!is2t) - pathbound = 1; - - for (index = 0; index < PATH_NUM; index++) { - apk_offset[index] = apk_normal_offset[index]; - apk_value[index] = apk_normal_value[index]; - afe_on_off[index] = 0x6fdb25a4; - } - - for (index = 0; index < APK_BB_REG_NUM; index++) { - for (path = 0; path < pathbound; path++) { - apk_rf_init_value[path][index] = - apk_normal_rf_init_value[path][index]; - apk_rf_value_0[path][index] = - apk_normal_rf_value_0[path][index]; - } - bb_ap_mode[index] = bb_normal_ap_mode[index]; - - apkbound = 6; - } - - for (index = 0; index < APK_BB_REG_NUM; index++) { - if (index == 0) - continue; - bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD); - } - - _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup); - - _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16); - - for (path = 0; path < pathbound; path++) { - if (path == RF90_PATH_A) { - offset = 0xb00; - for (index = 0; index < 11; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_1 - [index]); - - offset += 0x04; - } - - rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000); - - offset = 0xb68; - for (; index < 13; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_1 - [index]); - - offset += 0x04; - } - - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000); - - offset = 0xb00; - for (index = 0; index < 16; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_2 - [index]); - - offset += 0x04; - } - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000); - } else if (path == RF90_PATH_B) { - offset = 0xb70; - for (index = 0; index < 10; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_1 - [index]); - - offset += 0x04; - } - rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000); - rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000); - - offset = 0xb68; - index = 11; - for (; index < 13; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_1 - [index]); - - offset += 0x04; - } - - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000); - - offset = 0xb60; - for (index = 0; index < 16; index++) { - rtl_set_bbreg(hw, offset, MASKDWORD, - apk_normal_setting_value_2 - [index]); - - offset += 0x04; - } - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000); - } - - reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path, - 0xd, MASKDWORD); - - for (index = 0; index < APK_AFE_REG_NUM; index++) - rtl_set_bbreg(hw, afe_reg[index], MASKDWORD, - afe_on_off[path]); - - if (path == RF90_PATH_A) { - for (index = 0; index < APK_BB_REG_NUM; index++) { - if (index == 0) - continue; - rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, - bb_ap_mode[index]); - } - } - - _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup); - - if (path == 0) { - rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000); - } else { - rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD, - 0x10000); - rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD, - 0x1000f); - rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD, - 0x20103); - } - - delta_offset = ((delta + 14) / 2); - if (delta_offset < 0) - delta_offset = 0; - else if (delta_offset > 12) - delta_offset = 12; - - for (index = 0; index < APK_BB_REG_NUM; index++) { - if (index != 1) - continue; - - tmpreg = apk_rf_init_value[path][index]; - - if (!rtlefuse->b_apk_thermalmeterignore) { - bb_offset = (tmpreg & 0xF0000) >> 16; - - if (!(tmpreg & BIT(15))) - bb_offset = -bb_offset; - - delta_v = - apk_delta_mapping[index][delta_offset]; - - bb_offset += delta_v; - - if (bb_offset < 0) { - tmpreg = tmpreg & (~BIT(15)); - bb_offset = -bb_offset; - } else { - tmpreg = tmpreg | BIT(15); - } - - tmpreg = - (tmpreg & 0xFFF0FFFF) | (bb_offset << 16); - } - - rtl_set_rfreg(hw, (enum radio_path)path, 0xc, - MASKDWORD, 0x8992e); - rtl_set_rfreg(hw, (enum radio_path)path, 0x0, - MASKDWORD, apk_rf_value_0[path][index]); - rtl_set_rfreg(hw, (enum radio_path)path, 0xd, - MASKDWORD, tmpreg); - - i = 0; - do { - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000); - rtl_set_bbreg(hw, apk_offset[path], - MASKDWORD, apk_value[0]); - RTPRINT(rtlpriv, FINIT, INIT_IQK, - ("PHY_APCalibrate() offset 0x%x " - "value 0x%x\n", - apk_offset[path], - rtl_get_bbreg(hw, apk_offset[path], - MASKDWORD))); - - mdelay(3); - - rtl_set_bbreg(hw, apk_offset[path], - MASKDWORD, apk_value[1]); - RTPRINT(rtlpriv, FINIT, INIT_IQK, - ("PHY_APCalibrate() offset 0x%x " - "value 0x%x\n", - apk_offset[path], - rtl_get_bbreg(hw, apk_offset[path], - MASKDWORD))); - - mdelay(20); - - rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000); - - if (path == RF90_PATH_A) - tmpreg = rtl_get_bbreg(hw, 0xbd8, - 0x03E00000); - else - tmpreg = rtl_get_bbreg(hw, 0xbd8, - 0xF8000000); - - RTPRINT(rtlpriv, FINIT, INIT_IQK, - ("PHY_APCalibrate() offset " - "0xbd8[25:21] %x\n", tmpreg)); - - i++; - - } while (tmpreg > apkbound && i < 4); - - apk_result[path][index] = tmpreg; - } - } - - _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup); - - for (index = 0; index < APK_BB_REG_NUM; index++) { - if (index == 0) - continue; - rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]); - } - - _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16); - - for (path = 0; path < pathbound; path++) { - rtl_set_rfreg(hw, (enum radio_path)path, 0xd, - MASKDWORD, reg_d[path]); - - if (path == RF90_PATH_B) { - rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD, - 0x1000f); - rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD, - 0x20101); - } - - if (apk_result[path][1] > 6) - apk_result[path][1] = 6; - } - - for (path = 0; path < pathbound; path++) { - rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD, - ((apk_result[path][1] << 15) | - (apk_result[path][1] << 10) | - (apk_result[path][1] << 5) | - apk_result[path][1])); - - if (path == RF90_PATH_A) - rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD, - ((apk_result[path][1] << 15) | - (apk_result[path][1] << 10) | - (0x00 << 5) | 0x05)); - else - rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD, - ((apk_result[path][1] << 15) | - (apk_result[path][1] << 10) | - (0x02 << 5) | 0x05)); - - rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD, - ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) | - 0x08)); - - } - - rtlphy->b_apk_done = true; -#endif -} - -static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, - bool bmain, bool is2t) -{ - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - - if (is_hal_stop(rtlhal)) { - rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01); - rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); - } - if (is2t) { - if (bmain) - rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, - BIT(5) | BIT(6), 0x1); - else - rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, - BIT(5) | BIT(6), 0x2); - } else { - if (bmain) - rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2); - else - rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1); - - } -} - -#undef IQK_ADDA_REG_NUM -#undef IQK_DELAY_TIME - -void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - - long result[4][8]; - u8 i, final_candidate; - bool b_patha_ok, b_pathb_ok; - long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, - reg_ecc, reg_tmp = 0; - bool is12simular, is13simular, is23simular; - bool b_start_conttx = false, b_singletone = false; - u32 iqk_bb_reg[10] = { - ROFDM0_XARXIQIMBALANCE, - ROFDM0_XBRXIQIMBALANCE, - ROFDM0_ECCATHRESHOLD, - ROFDM0_AGCRSSITABLE, - ROFDM0_XATXIQIMBALANCE, - ROFDM0_XBTXIQIMBALANCE, - ROFDM0_XCTXIQIMBALANCE, - ROFDM0_XCTXAFE, - ROFDM0_XDTXAFE, - ROFDM0_RXIQEXTANTA - }; - - if (b_recovery) { - _rtl92c_phy_reload_adda_registers(hw, - iqk_bb_reg, - rtlphy->iqk_bb_backup, 10); - return; - } - if (b_start_conttx || b_singletone) - return; - for (i = 0; i < 8; i++) { - result[0][i] = 0; - result[1][i] = 0; - result[2][i] = 0; - result[3][i] = 0; - } - final_candidate = 0xff; - b_patha_ok = false; - b_pathb_ok = false; - is12simular = false; - is23simular = false; - is13simular = false; - for (i = 0; i < 3; i++) { - if (IS_92C_SERIAL(rtlhal->version)) - _rtl92c_phy_iq_calibrate(hw, result, i, true); - else - _rtl92c_phy_iq_calibrate(hw, result, i, false); - if (i == 1) { - is12simular = _rtl92c_phy_simularity_compare(hw, - result, 0, - 1); - if (is12simular) { - final_candidate = 0; - break; - } - } - if (i == 2) { - is13simular = _rtl92c_phy_simularity_compare(hw, - result, 0, - 2); - if (is13simular) { - final_candidate = 0; - break; - } - is23simular = _rtl92c_phy_simularity_compare(hw, - result, 1, - 2); - if (is23simular) - final_candidate = 1; - else { - for (i = 0; i < 8; i++) - reg_tmp += result[3][i]; - - if (reg_tmp != 0) - final_candidate = 3; - else - final_candidate = 0xFF; - } - } - } - for (i = 0; i < 4; i++) { - reg_e94 = result[i][0]; - reg_e9c = result[i][1]; - reg_ea4 = result[i][2]; - reg_eac = result[i][3]; - reg_eb4 = result[i][4]; - reg_ebc = result[i][5]; - reg_ec4 = result[i][6]; - reg_ecc = result[i][7]; - } - if (final_candidate != 0xff) { - rtlphy->reg_e94 = reg_e94 = result[final_candidate][0]; - rtlphy->reg_e9c = reg_e9c = result[final_candidate][1]; - reg_ea4 = result[final_candidate][2]; - reg_eac = result[final_candidate][3]; - rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4]; - rtlphy->reg_ebc = reg_ebc = result[final_candidate][5]; - reg_ec4 = result[final_candidate][6]; - reg_ecc = result[final_candidate][7]; - b_patha_ok = b_pathb_ok = true; - } else { - rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100; - rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0; - } - if (reg_e94 != 0) /*&&(reg_ea4 != 0) */ - _rtl92c_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result, - final_candidate, - (reg_ea4 == 0)); - if (IS_92C_SERIAL(rtlhal->version)) { - if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */ - _rtl92c_phy_path_b_fill_iqk_matrix(hw, b_pathb_ok, - result, - final_candidate, - (reg_ec4 == 0)); - } - _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg, - rtlphy->iqk_bb_backup, 10); -} - -void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw) -{ - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - bool b_start_conttx = false, b_singletone = false; - - if (b_start_conttx || b_singletone) - return; - if (IS_92C_SERIAL(rtlhal->version)) - _rtl92c_phy_lc_calibrate(hw, true); - else - _rtl92c_phy_lc_calibrate(hw, false); -} - -void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - - if (rtlphy->b_apk_done) - return; - if (IS_92C_SERIAL(rtlhal->version)) - _rtl92c_phy_ap_calibrate(hw, delta, true); - else - _rtl92c_phy_ap_calibrate(hw, delta, false); -} - -void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain) -{ - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - - if (IS_92C_SERIAL(rtlhal->version)) - _rtl92c_phy_set_rfpath_switch(hw, bmain, true); - else - _rtl92c_phy_set_rfpath_switch(hw, bmain, false); -} - -bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - bool b_postprocessing = false; - - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - ("-->IO Cmd(%#x), set_io_inprogress(%d)\n", - iotype, rtlphy->set_io_inprogress)); - do { - switch (iotype) { - case IO_CMD_RESUME_DM_BY_SCAN: - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - ("[IO CMD] Resume DM after scan.\n")); - b_postprocessing = true; - break; - case IO_CMD_PAUSE_DM_BY_SCAN: - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - ("[IO CMD] Pause DM before scan.\n")); - b_postprocessing = true; - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - ("switch case not process\n")); - break; - } - } while (false); - if (b_postprocessing && !rtlphy->set_io_inprogress) { - rtlphy->set_io_inprogress = true; - rtlphy->current_io_type = iotype; - } else { - return false; - } - rtl92c_phy_set_io(hw); - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype)); - return true; -} - -void rtl92c_phy_set_io(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_phy *rtlphy = &(rtlpriv->phy); - - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - ("--->Cmd(%#x), set_io_inprogress(%d)\n", - rtlphy->current_io_type, rtlphy->set_io_inprogress)); - switch (rtlphy->current_io_type) { - case IO_CMD_RESUME_DM_BY_SCAN: - dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1; - rtl92c_dm_write_dig(hw); - rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); - break; - case IO_CMD_PAUSE_DM_BY_SCAN: - rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue; - dm_digtable.cur_igvalue = 0x17; - rtl92c_dm_write_dig(hw); - break; - default: - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - ("switch case not process\n")); - break; - } - rtlphy->set_io_inprogress = false; - RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, - ("<---(%#x)\n", rtlphy->current_io_type)); -} - -void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); - rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); - rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); -} - -static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw) -{ - u32 u4b_tmp; - u8 delay = 5; - struct rtl_priv *rtlpriv = rtl_priv(hw); - - rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); - rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); - rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); - u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK); - while (u4b_tmp != 0 && delay > 0) { - rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0); - rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); - rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); - u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK); - delay--; - } - if (delay == 0) { - rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); - rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); - RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, - ("Switch RF timeout !!!.\n")); - return; - } - rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); - rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22); -} - static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, enum rf_pwrstate rfpwr_state) { @@ -2648,7 +599,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies))); ppsc->last_sleep_jiffies = jiffies; - _rtl92ce_phy_set_rf_sleep(hw); + _rtl92c_phy_set_rf_sleep(hw); break; } default: @@ -2663,7 +614,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, return bresult; } -bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw, +bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, enum rf_pwrstate rfpwr_state) { struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h index ca4daee6e9a8..a37267e3fc22 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h @@ -57,8 +57,6 @@ #define IQK_MAC_REG_NUM 4 #define RF90_PATH_MAX 2 -#define CHANNEL_MAX_NUMBER 14 -#define CHANNEL_GROUP_MAX 3 #define CT_OFFSET_MAC_ADDR 0X16 @@ -78,9 +76,7 @@ #define CT_OFFSET_CUSTOMER_ID 0x7F #define RTL92C_MAX_PATH_NUM 2 -#define CHANNEL_MAX_NUMBER 14 -#define CHANNEL_GROUP_MAX 3 - +#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255 enum swchnlcmd_id { CMDID_END, CMDID_SET_TXPOWEROWER_LEVEL, @@ -195,11 +191,11 @@ extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask); -extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw, +extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask, u32 data); extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw); -extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw); +bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw); extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw); extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw, enum radio_path rfpath); @@ -227,11 +223,32 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath); bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); -extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw, +bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw, enum rf_pwrstate rfpwr_state); -void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw); void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw); bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype); void rtl92c_phy_set_io(struct ieee80211_hw *hw); +void rtl92c_bb_block_on(struct ieee80211_hw *hw); +u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset); +u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset); +u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask); +void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data); +void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, + u32 data); +void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 offset, + u32 data); +void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, + u32 data); +bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); +void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw); +bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw); +void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h index 875d51465225..b0868a613841 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h @@ -63,7 +63,15 @@ #define REG_LEDCFG3 0x004F #define REG_FSIMR 0x0050 #define REG_FSISR 0x0054 - +#define REG_HSIMR 0x0058 +#define REG_HSISR 0x005c + +/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Pin Control. */ +#define REG_GPIO_PIN_CTRL_2 0x0060 +/* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */ +#define REG_GPIO_IO_SEL_2 0x0062 +/* RTL8723 WIFI/BT/GPS Multi-Function control source. */ +#define REG_MULTI_FUNC_CTRL 0x0068 #define REG_MCUFWDL 0x0080 #define REG_HMEBOX_EXT_0 0x0088 @@ -79,6 +87,7 @@ #define REG_PCIE_MIO_INTD 0x00E8 #define REG_HPON_FSM 0x00EC #define REG_SYS_CFG 0x00F0 +#define REG_GPIO_OUTSTS 0x00F4 /* For RTL8723 only.*/ #define REG_CR 0x0100 #define REG_PBP 0x0104 @@ -209,6 +218,8 @@ #define REG_RDG_PIFS 0x0513 #define REG_SIFS_CTX 0x0514 #define REG_SIFS_TRX 0x0516 +#define REG_SIFS_CCK 0x0514 +#define REG_SIFS_OFDM 0x0516 #define REG_AGGR_BREAK_TIME 0x051A #define REG_SLOT 0x051B #define REG_TX_PTCL_CTRL 0x0520 @@ -261,6 +272,10 @@ #define REG_MAC_SPEC_SIFS 0x063A #define REG_RESP_SIFS_CCK 0x063C #define REG_RESP_SIFS_OFDM 0x063E +/* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */ +#define REG_R2T_SIFS 0x063C +/* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */ +#define REG_T2T_SIFS 0x063E #define REG_ACKTO 0x0640 #define REG_CTS2TO 0x0641 #define REG_EIFS 0x0642 @@ -641,9 +656,10 @@ #define STOPBE BIT(1) #define STOPBK BIT(0) -#define RCR_APPFCS BIT(31) +#define RCR_APP_FCS BIT(31) #define RCR_APP_MIC BIT(30) #define RCR_APP_ICV BIT(29) +#define RCR_APP_PHYSTS BIT(28) #define RCR_APP_PHYST_RXFF BIT(28) #define RCR_APP_BA_SSN BIT(27) #define RCR_ENMBID BIT(24) @@ -759,6 +775,7 @@ #define BOOT_FROM_EEPROM BIT(4) #define EEPROM_EN BIT(5) +#define EEPROMSEL BOOT_FROM_EEPROM #define AFE_BGEN BIT(0) #define AFE_MBEN BIT(1) @@ -876,6 +893,8 @@ #define BD_MAC2 BIT(9) #define BD_MAC1 BIT(10) #define IC_MACPHY_MODE BIT(11) +#define BT_FUNC BIT(16) +#define VENDOR_ID BIT(19) #define PAD_HWPD_IDN BIT(22) #define TRP_VAUX_EN BIT(23) #define TRP_BT_EN BIT(24) @@ -883,6 +902,28 @@ #define BD_HCI_SEL BIT(26) #define TYPE_ID BIT(27) +/* REG_GPIO_OUTSTS (For RTL8723 only) */ +#define EFS_HCI_SEL (BIT(0)|BIT(1)) +#define PAD_HCI_SEL (BIT(2)|BIT(3)) +#define HCI_SEL (BIT(4)|BIT(5)) +#define PKG_SEL_HCI BIT(6) +#define FEN_GPS BIT(7) +#define FEN_BT BIT(8) +#define FEN_WL BIT(9) +#define FEN_PCI BIT(10) +#define FEN_USB BIT(11) +#define BTRF_HWPDN_N BIT(12) +#define WLRF_HWPDN_N BIT(13) +#define PDN_BT_N BIT(14) +#define PDN_GPS_N BIT(15) +#define BT_CTL_HWPDN BIT(16) +#define GPS_CTL_HWPDN BIT(17) +#define PPHY_SUSB BIT(20) +#define UPHY_SUSB BIT(21) +#define PCI_SUSEN BIT(22) +#define USB_SUSEN BIT(23) +#define RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28)) + #define CHIP_VER_RTL_MASK 0xF000 #define CHIP_VER_RTL_SHIFT 12 @@ -1035,7 +1076,7 @@ #define _RARF_RC7(x) (((x) & 0x1F) << 16) #define _RARF_RC8(x) (((x) & 0x1F) << 24) -#define AC_PARAM_TXOP_LIMIT_OFFSET 16 +#define AC_PARAM_TXOP_OFFSET 16 #define AC_PARAM_ECW_MAX_OFFSET 12 #define AC_PARAM_ECW_MIN_OFFSET 8 #define AC_PARAM_AIFS_OFFSET 0 @@ -1184,6 +1225,30 @@ #define HAL_8192C_HW_GPIO_WPS_BIT BIT(2) +/* REG_MULTI_FUNC_CTRL(For RTL8723 Only) */ +/* Enable GPIO[9] as WiFi HW PDn source */ +#define WL_HWPDN_EN BIT(0) +/* WiFi HW PDn polarity control */ +#define WL_HWPDN_SL BIT(1) +/* WiFi function enable */ +#define WL_FUNC_EN BIT(2) +/* Enable GPIO[9] as WiFi RF HW PDn source */ +#define WL_HWROF_EN BIT(3) +/* Enable GPIO[11] as BT HW PDn source */ +#define BT_HWPDN_EN BIT(16) +/* BT HW PDn polarity control */ +#define BT_HWPDN_SL BIT(17) +/* BT function enable */ +#define BT_FUNC_EN BIT(18) +/* Enable GPIO[11] as BT/GPS RF HW PDn source */ +#define BT_HWROF_EN BIT(19) +/* Enable GPIO[10] as GPS HW PDn source */ +#define GPS_HWPDN_EN BIT(20) +/* GPS HW PDn polarity control */ +#define GPS_HWPDN_SL BIT(21) +/* GPS function enable */ +#define GPS_FUNC_EN BIT(22) + #define RPMAC_RESET 0x100 #define RPMAC_TXSTART 0x104 #define RPMAC_TXLEGACYSIG 0x108 @@ -1496,7 +1561,7 @@ #define BTXHTSTBC 0x30 #define BTXHTADVANCECODING 0x40 #define BTXHTSHORTGI 0x80 -#define BTXHTNUMBERHT_LT F 0x300 +#define BTXHTNUMBERHT_LTF 0x300 #define BTXHTCRC8 0x3fc00 #define BCOUNTERRESET 0x10000 #define BNUMOFOFDMTX 0xffff diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c index ffd8e04c4028..669b1168dbec 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c @@ -61,7 +61,7 @@ void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) } } -void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, +void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -410,7 +410,7 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, } } -void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, +void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel) { u32 writeVal[2], powerBase0[2], powerBase1[2]; @@ -430,7 +430,7 @@ void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, } } -bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw) +bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); @@ -484,11 +484,11 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw) switch (rfpath) { case RF90_PATH_A: - rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, + rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw, (enum radio_path) rfpath); break; case RF90_PATH_B: - rtstatus = rtl92c_phy_config_rf_with_headerfile(hw, + rtstatus = rtl92ce_phy_config_rf_with_headerfile(hw, (enum radio_path) rfpath); break; case RF90_PATH_C: diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h index d3014f99bb7b..3aa520c1c171 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h @@ -40,5 +40,8 @@ extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel); extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel); -extern bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw); +bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw); +bool rtl92ce_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath); + #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index b366e8862929..b1cc4d44f534 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@ -37,6 +37,7 @@ #include "phy.h" #include "dm.h" #include "hw.h" +#include "rf.h" #include "sw.h" #include "trx.h" #include "led.h" @@ -46,13 +47,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); - rtlpriv->dm.b_dm_initialgain_enable = 1; + rtlpriv->dm.dm_initialgain_enable = 1; rtlpriv->dm.dm_flag = 0; - rtlpriv->dm.b_disable_framebursting = 0;; + rtlpriv->dm.disable_framebursting = 0; rtlpriv->dm.thermalvalue = 0; rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13); - rtlpci->receive_config = (RCR_APPFCS | + rtlpci->receive_config = (RCR_APP_FCS | RCR_AMF | RCR_ADF | RCR_APP_MIC | @@ -122,7 +123,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = { .switch_channel = rtl92c_phy_sw_chnl, .dm_watchdog = rtl92c_dm_watchdog, .scan_operation_backup = rtl92c_phy_scan_operation_backup, - .set_rf_power_state = rtl92c_phy_set_rf_power_state, + .set_rf_power_state = rtl92ce_phy_set_rf_power_state, .led_control = rtl92ce_led_control, .set_desc = rtl92ce_set_desc, .get_desc = rtl92ce_get_desc, @@ -133,8 +134,17 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = { .deinit_sw_leds = rtl92ce_deinit_sw_leds, .get_bbreg = rtl92c_phy_query_bb_reg, .set_bbreg = rtl92c_phy_set_bb_reg, - .get_rfreg = rtl92c_phy_query_rf_reg, - .set_rfreg = rtl92c_phy_set_rf_reg, + .get_rfreg = rtl92ce_phy_query_rf_reg, + .set_rfreg = rtl92ce_phy_set_rf_reg, + .cmd_send_packet = _rtl92c_cmd_send_packet, + .phy_rf6052_config = rtl92ce_phy_rf6052_config, + .phy_rf6052_set_cck_txpower = rtl92ce_phy_rf6052_set_cck_txpower, + .phy_rf6052_set_ofdm_txpower = rtl92ce_phy_rf6052_set_ofdm_txpower, + .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile, + .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile, + .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate, + .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback, + .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower, }; static struct rtl_mod_params rtl92ce_mod_params = { diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h index de1198c38d4e..36e657668c1e 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h @@ -33,5 +33,19 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw); void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw); void rtl92c_init_var_map(struct ieee80211_hw *hw); +bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, + struct sk_buff *skb); +void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel); +void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel); +bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype); +bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype); +void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t); +u32 rtl92ce_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, u32 bitmask); +void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c index bf5852f2d634..aa2b5815600f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c @@ -36,7 +36,7 @@ #include "trx.h" #include "led.h" -static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(u16 fc, +static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(__le16 fc, unsigned int skb_queue) { @@ -245,24 +245,24 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, struct rtl_stats *pstats, struct rx_desc_92c *pdesc, struct rx_fwinfo_92c *p_drvinfo, - bool bpacket_match_bssid, - bool bpacket_toself, - bool b_packet_beacon) + bool packet_match_bssid, + bool packet_toself, + bool packet_beacon) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct phy_sts_cck_8192s_t *cck_buf; s8 rx_pwr_all, rx_pwr[4]; - u8 rf_rx_num, evm, pwdb_all; + u8 evm, pwdb_all, rf_rx_num = 0; u8 i, max_spatial_stream; - u32 rssi, total_rssi; + u32 rssi, total_rssi = 0; bool is_cck_rate; is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); - pstats->b_packet_matchbssid = bpacket_match_bssid; - pstats->b_packet_toself = bpacket_toself; - pstats->b_is_cck = is_cck_rate; - pstats->b_packet_beacon = b_packet_beacon; - pstats->b_is_cck = is_cck_rate; + pstats->packet_matchbssid = packet_match_bssid; + pstats->packet_toself = packet_toself; + pstats->is_cck = is_cck_rate; + pstats->packet_beacon = packet_beacon; + pstats->is_cck = is_cck_rate; pstats->rx_mimo_signalquality[0] = -1; pstats->rx_mimo_signalquality[1] = -1; @@ -315,7 +315,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, pstats->rx_pwdb_all = pwdb_all; pstats->recvsignalpower = rx_pwr_all; - if (bpacket_match_bssid) { + if (packet_match_bssid) { u8 sq; if (pstats->rx_pwdb_all > 40) sq = 100; @@ -334,10 +334,10 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, pstats->rx_mimo_signalquality[1] = -1; } } else { - rtlpriv->dm.brfpath_rxenable[0] = - rtlpriv->dm.brfpath_rxenable[1] = true; + rtlpriv->dm.rfpath_rxenable[0] = + rtlpriv->dm.rfpath_rxenable[1] = true; for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) { - if (rtlpriv->dm.brfpath_rxenable[i]) + if (rtlpriv->dm.rfpath_rxenable[i]) rf_rx_num++; rx_pwr[i] = @@ -347,7 +347,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, rtlpriv->stats.rx_snr_db[i] = (long)(p_drvinfo->rxsnr[i] / 2); - if (bpacket_match_bssid) + if (packet_match_bssid) pstats->rx_mimo_signalstrength[i] = (u8) rssi; } @@ -366,7 +366,7 @@ static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw, for (i = 0; i < max_spatial_stream; i++) { evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]); - if (bpacket_match_bssid) { + if (packet_match_bssid) { if (i == 0) pstats->signalquality = (u8) (evm & 0xff); @@ -393,7 +393,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw, u8 rfpath; u32 last_rssi, tmpval; - if (pstats->b_packet_toself || pstats->b_packet_beacon) { + if (pstats->packet_toself || pstats->packet_beacon) { rtlpriv->stats.rssi_calculate_cnt++; if (rtlpriv->stats.ui_rssi.total_num++ >= @@ -421,7 +421,7 @@ static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw, pstats->rssi = rtlpriv->stats.signal_strength; } - if (!pstats->b_is_cck && pstats->b_packet_toself) { + if (!pstats->is_cck && pstats->packet_toself) { for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; rfpath++) { @@ -463,7 +463,7 @@ static void _rtl92ce_update_rxsignalstatistics(struct ieee80211_hw *hw, struct rtl_stats *pstats) { struct rtl_priv *rtlpriv = rtl_priv(hw); - int weighting; + int weighting = 0; if (rtlpriv->stats.recv_signal_power == 0) rtlpriv->stats.recv_signal_power = pstats->recvsignalpower; @@ -493,7 +493,7 @@ static void _rtl92ce_process_pwdb(struct ieee80211_hw *hw, rtlpriv->dm.undecorated_smoothed_pwdb; } - if (pstats->b_packet_toself || pstats->b_packet_beacon) { + if (pstats->packet_toself || pstats->packet_beacon) { if (undecorated_smoothed_pwdb < 0) undecorated_smoothed_pwdb = pstats->rx_pwdb_all; @@ -525,7 +525,7 @@ static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw, u32 last_evm, n_spatialstream, tmpval; if (pstats->signalquality != 0) { - if (pstats->b_packet_toself || pstats->b_packet_beacon) { + if (pstats->packet_toself || pstats->packet_beacon) { if (rtlpriv->stats.ui_link_quality.total_num++ >= PHY_LINKQUALITY_SLID_WIN_MAX) { @@ -595,8 +595,8 @@ static void _rtl92ce_process_phyinfo(struct ieee80211_hw *hw, struct rtl_stats *pcurrent_stats) { - if (!pcurrent_stats->b_packet_matchbssid && - !pcurrent_stats->b_packet_beacon) + if (!pcurrent_stats->packet_matchbssid && + !pcurrent_stats->packet_beacon) return; _rtl92ce_process_ui_rssi(hw, pcurrent_stats); @@ -617,34 +617,36 @@ static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw, u8 *tmp_buf; u8 *praddr; u8 *psaddr; - u16 fc, type; - bool b_packet_matchbssid, b_packet_toself, b_packet_beacon; + __le16 fc; + u16 type, c_fc; + bool packet_matchbssid, packet_toself, packet_beacon; tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift; hdr = (struct ieee80211_hdr *)tmp_buf; - fc = le16_to_cpu(hdr->frame_control); + fc = hdr->frame_control; + c_fc = le16_to_cpu(fc); type = WLAN_FC_GET_TYPE(fc); praddr = hdr->addr1; psaddr = hdr->addr2; - b_packet_matchbssid = + packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) && (!compare_ether_addr(mac->bssid, - (fc & IEEE80211_FCTL_TODS) ? - hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ? + (c_fc & IEEE80211_FCTL_TODS) ? + hdr->addr1 : (c_fc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 : hdr->addr3)) && - (!pstats->b_hwerror) && (!pstats->b_crc) && (!pstats->b_icv)); + (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv)); - b_packet_toself = b_packet_matchbssid && + packet_toself = packet_matchbssid && (!compare_ether_addr(praddr, rtlefuse->dev_addr)); if (ieee80211_is_beacon(fc)) - b_packet_beacon = true; + packet_beacon = true; _rtl92ce_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, - b_packet_matchbssid, b_packet_toself, - b_packet_beacon); + packet_matchbssid, packet_toself, + packet_beacon); _rtl92ce_process_phyinfo(hw, tmp_buf, pstats); } @@ -662,14 +664,14 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * RX_DRV_INFO_SIZE_UNIT; stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); - stats->b_icv = (u16) GET_RX_DESC_ICV(pdesc); - stats->b_crc = (u16) GET_RX_DESC_CRC32(pdesc); - stats->b_hwerror = (stats->b_crc | stats->b_icv); + stats->icv = (u16) GET_RX_DESC_ICV(pdesc); + stats->crc = (u16) GET_RX_DESC_CRC32(pdesc); + stats->hwerror = (stats->crc | stats->icv); stats->decrypted = !GET_RX_DESC_SWDEC(pdesc); stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc); - stats->b_shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); - stats->b_isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); - stats->b_isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) + stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); + stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); + stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) && (GET_RX_DESC_FAGGR(pdesc) == 1)); stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); @@ -689,7 +691,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw, if (GET_RX_DESC_RXHT(pdesc)) rx_status->flag |= RX_FLAG_HT; - rx_status->flag |= RX_FLAG_TSFT; + rx_status->flag |= RX_FLAG_MACTIME_MPDU; if (stats->decrypted) rx_status->flag |= RX_FLAG_DECRYPTED; @@ -727,27 +729,24 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); - bool b_defaultadapter = true; - - struct ieee80211_sta *sta = ieee80211_find_sta(mac->vif, mac->bssid); - + bool defaultadapter = true; + struct ieee80211_sta *sta; u8 *pdesc = (u8 *) pdesc_tx; struct rtl_tcb_desc tcb_desc; u8 *qc = ieee80211_get_qos_ctl(hdr); u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; u16 seq_number; - u16 fc = le16_to_cpu(hdr->frame_control); + __le16 fc = hdr->frame_control; u8 rate_flag = info->control.rates[0].flags; enum rtl_desc_qsel fw_qsel = - _rtl92ce_map_hwqueue_to_fwqueue(le16_to_cpu(hdr->frame_control), - queue_index); + _rtl92ce_map_hwqueue_to_fwqueue(fc, queue_index); - bool b_firstseg = ((hdr->seq_ctrl & - cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); + bool firstseg = ((hdr->seq_ctrl & + cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0); - bool b_lastseg = ((hdr->frame_control & - cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); + bool lastseg = ((hdr->frame_control & + cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0); dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, @@ -759,7 +758,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c)); - if (b_firstseg) { + if (firstseg) { SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); SET_TX_DESC_TX_RATE(pdesc, tcb_desc.hw_rate); @@ -774,25 +773,25 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, } SET_TX_DESC_SEQ(pdesc, seq_number); - SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.b_rts_enable && + SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.rts_enable && !tcb_desc. - b_cts_enable) ? 1 : 0)); + cts_enable) ? 1 : 0)); SET_TX_DESC_HW_RTS_ENABLE(pdesc, - ((tcb_desc.b_rts_enable - || tcb_desc.b_cts_enable) ? 1 : 0)); - SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.b_cts_enable) ? 1 : 0)); - SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.b_rts_stbc) ? 1 : 0)); + ((tcb_desc.rts_enable + || tcb_desc.cts_enable) ? 1 : 0)); + SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.cts_enable) ? 1 : 0)); + SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.rts_stbc) ? 1 : 0)); SET_TX_DESC_RTS_RATE(pdesc, tcb_desc.rts_rate); SET_TX_DESC_RTS_BW(pdesc, 0); SET_TX_DESC_RTS_SC(pdesc, tcb_desc.rts_sc); SET_TX_DESC_RTS_SHORT(pdesc, ((tcb_desc.rts_rate <= DESC92C_RATE54M) ? - (tcb_desc.b_rts_use_shortpreamble ? 1 : 0) - : (tcb_desc.b_rts_use_shortgi ? 1 : 0))); + (tcb_desc.rts_use_shortpreamble ? 1 : 0) + : (tcb_desc.rts_use_shortgi ? 1 : 0))); if (mac->bw_40) { - if (tcb_desc.b_packet_bw) { + if (tcb_desc.packet_bw) { SET_TX_DESC_DATA_BW(pdesc, 1); SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3); } else { @@ -811,10 +810,13 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_LINIP(pdesc, 0); SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len); + rcu_read_lock(); + sta = ieee80211_find_sta(mac->vif, mac->bssid); if (sta) { u8 ampdu_density = sta->ht_cap.ampdu_density; SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); } + rcu_read_unlock(); if (info->control.hw_key) { struct ieee80211_key_conf *keyconf = @@ -854,14 +856,14 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, } } - SET_TX_DESC_FIRST_SEG(pdesc, (b_firstseg ? 1 : 0)); - SET_TX_DESC_LAST_SEG(pdesc, (b_lastseg ? 1 : 0)); + SET_TX_DESC_FIRST_SEG(pdesc, (firstseg ? 1 : 0)); + SET_TX_DESC_LAST_SEG(pdesc, (lastseg ? 1 : 0)); SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len); SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping)); - if (rtlpriv->dm.b_useramask) { + if (rtlpriv->dm.useramask) { SET_TX_DESC_RATE_ID(pdesc, tcb_desc.ratr_index); SET_TX_DESC_MACID(pdesc, tcb_desc.mac_id); } else { @@ -869,16 +871,16 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, SET_TX_DESC_MACID(pdesc, tcb_desc.ratr_index); } - if ((!ieee80211_is_data_qos(fc)) && ppsc->b_leisure_ps && - ppsc->b_fwctrl_lps) { + if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps && + ppsc->fwctrl_lps) { SET_TX_DESC_HWSEQ_EN(pdesc, 1); SET_TX_DESC_PKT_ID(pdesc, 8); - if (!b_defaultadapter) + if (!defaultadapter) SET_TX_DESC_QOS(pdesc, 1); } - SET_TX_DESC_MORE_FRAG(pdesc, (b_lastseg ? 0 : 1)); + SET_TX_DESC_MORE_FRAG(pdesc, (lastseg ? 0 : 1)); if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || is_broadcast_ether_addr(ieee80211_get_DA(hdr))) { @@ -889,8 +891,8 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, } void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, - u8 *pdesc, bool b_firstseg, - bool b_lastseg, struct sk_buff *skb) + u8 *pdesc, bool firstseg, + bool lastseg, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -901,11 +903,11 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, PCI_DMA_TODEVICE); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); - u16 fc = le16_to_cpu(hdr->frame_control); + __le16 fc = hdr->frame_control; CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); - if (b_firstseg) + if (firstseg) SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN); SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); @@ -1029,3 +1031,36 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue) BIT(0) << (hw_queue)); } } + +bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring; + struct rtl_tx_desc *pdesc; + u8 own; + unsigned long flags; + struct sk_buff *pskb = NULL; + + ring = &rtlpci->tx_ring[BEACON_QUEUE]; + + spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags); + + pskb = __skb_dequeue(&ring->queue); + if (pskb) + kfree_skb(pskb); + + pdesc = &ring->desc[0]; + own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN); + + rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb); + + __skb_queue_tail(&ring->queue, skb); + + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); + + rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE); + + return true; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h index 53d0e0a5af5c..803adcc80c96 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h @@ -40,470 +40,494 @@ #define USB_HWDESC_HEADER_LEN 32 #define CRCLENGTH 4 +/* Define a macro that takes a le32 word, converts it to host ordering, + * right shifts by a specified count, creates a mask of the specified + * bit count, and extracts that number of bits. + */ + +#define SHIFT_AND_MASK_LE(__pdesc, __shift, __mask) \ + ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \ + BIT_LEN_MASK_32(__mask)) + +/* Define a macro that clears a bit field in an le32 word and + * sets the specified value into that bit field. The resulting + * value remains in le32 ordering; however, it is properly converted + * to host ordering for the clear and set operations before conversion + * back to le32. + */ + +#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \ + (*(__le32 *)(__pdesc) = \ + (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \ + (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \ + (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift))))); + +/* macros to read/write various fields in RX or TX descriptors */ + #define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val) + SET_BITS_OFFSET_LE(__pdesc, 0, 16, __val) #define SET_TX_DESC_OFFSET(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val) + SET_BITS_OFFSET_LE(__pdesc, 16, 8, __val) #define SET_TX_DESC_BMC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val) + SET_BITS_OFFSET_LE(__pdesc, 24, 1, __val) #define SET_TX_DESC_HTC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val) + SET_BITS_OFFSET_LE(__pdesc, 25, 1, __val) #define SET_TX_DESC_LAST_SEG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val) + SET_BITS_OFFSET_LE(__pdesc, 26, 1, __val) #define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val) + SET_BITS_OFFSET_LE(__pdesc, 27, 1, __val) #define SET_TX_DESC_LINIP(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val) + SET_BITS_OFFSET_LE(__pdesc, 28, 1, __val) #define SET_TX_DESC_NO_ACM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val) + SET_BITS_OFFSET_LE(__pdesc, 29, 1, __val) #define SET_TX_DESC_GF(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) + SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val) #define SET_TX_DESC_OWN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) + SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val) #define GET_TX_DESC_PKT_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 0, 16) + SHIFT_AND_MASK_LE(__pdesc, 0, 16) #define GET_TX_DESC_OFFSET(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 16, 8) + SHIFT_AND_MASK_LE(__pdesc, 16, 8) #define GET_TX_DESC_BMC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 24, 1) + SHIFT_AND_MASK_LE(__pdesc, 24, 1) #define GET_TX_DESC_HTC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 25, 1) + SHIFT_AND_MASK_LE(__pdesc, 25, 1) #define GET_TX_DESC_LAST_SEG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 26, 1) + SHIFT_AND_MASK_LE(__pdesc, 26, 1) #define GET_TX_DESC_FIRST_SEG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 27, 1) + SHIFT_AND_MASK_LE(__pdesc, 27, 1) #define GET_TX_DESC_LINIP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 28, 1) + SHIFT_AND_MASK_LE(__pdesc, 28, 1) #define GET_TX_DESC_NO_ACM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 29, 1) + SHIFT_AND_MASK_LE(__pdesc, 29, 1) #define GET_TX_DESC_GF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 30, 1) + SHIFT_AND_MASK_LE(__pdesc, 30, 1) #define GET_TX_DESC_OWN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 31, 1) + SHIFT_AND_MASK_LE(__pdesc, 31, 1) #define SET_TX_DESC_MACID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 5, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 0, 5, __val) #define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 5, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 5, 1, __val) #define SET_TX_DESC_BK(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 6, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 6, 1, __val) #define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 7, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 7, 1, __val) #define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 8, 5, __val) #define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 13, 1, __val) #define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 14, 1, __val) #define SET_TX_DESC_PIFS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 15, 1, __val) #define SET_TX_DESC_RATE_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 16, 4, __val) #define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 20, 1, __val) #define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 21, 1, __val) #define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 22, 2, __val) #define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val) + SET_BITS_OFFSET_LE(__pdesc+4, 24, 8, __val) #define GET_TX_DESC_MACID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) + SHIFT_AND_MASK_LE(__pdesc+4, 0, 5) #define GET_TX_DESC_AGG_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 5, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 5, 1) #define GET_TX_DESC_AGG_BREAK(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 6, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 6, 1) #define GET_TX_DESC_RDG_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 7, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 7, 1) #define GET_TX_DESC_QUEUE_SEL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 8, 5) + SHIFT_AND_MASK_LE(__pdesc+4, 8, 5) #define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 13, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 13, 1) #define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 14, 1) #define GET_TX_DESC_PIFS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 15, 1) #define GET_TX_DESC_RATE_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) + SHIFT_AND_MASK_LE(__pdesc+4, 16, 4) #define GET_TX_DESC_NAV_USE_HDR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 20, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 20, 1) #define GET_TX_DESC_EN_DESC_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 21, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 21, 1) #define GET_TX_DESC_SEC_TYPE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 22, 2) + SHIFT_AND_MASK_LE(__pdesc+4, 22, 2) #define GET_TX_DESC_PKT_OFFSET(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 24, 8) + SHIFT_AND_MASK_LE(__pdesc+4, 24, 8) #define SET_TX_DESC_RTS_RC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 0, 6, __val) #define SET_TX_DESC_DATA_RC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 6, 6, __val) #define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 14, 2, __val) #define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 17, 1, __val) #define SET_TX_DESC_RAW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 18, 1, __val) #define SET_TX_DESC_CCX(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 19, 1, __val) #define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 20, 3, __val) #define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 24, 1, __val) #define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 25, 1, __val) #define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 26, 2, __val) #define SET_TX_DESC_TX_ANTL(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 28, 2, __val) #define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val) + SET_BITS_OFFSET_LE(__pdesc+8, 30, 2, __val) #define GET_TX_DESC_RTS_RC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 0, 6) + SHIFT_AND_MASK_LE(__pdesc+8, 0, 6) #define GET_TX_DESC_DATA_RC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 6, 6) + SHIFT_AND_MASK_LE(__pdesc+8, 6, 6) #define GET_TX_DESC_BAR_RTY_TH(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 14, 2) + SHIFT_AND_MASK_LE(__pdesc+8, 14, 2) #define GET_TX_DESC_MORE_FRAG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 17, 1) + SHIFT_AND_MASK_LE(__pdesc+8, 17, 1) #define GET_TX_DESC_RAW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 18, 1) + SHIFT_AND_MASK_LE(__pdesc+8, 18, 1) #define GET_TX_DESC_CCX(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 19, 1) + SHIFT_AND_MASK_LE(__pdesc+8, 19, 1) #define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 20, 3) + SHIFT_AND_MASK_LE(__pdesc+8, 20, 3) #define GET_TX_DESC_ANTSEL_A(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 24, 1) + SHIFT_AND_MASK_LE(__pdesc+8, 24, 1) #define GET_TX_DESC_ANTSEL_B(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 25, 1) + SHIFT_AND_MASK_LE(__pdesc+8, 25, 1) #define GET_TX_DESC_TX_ANT_CCK(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 26, 2) + SHIFT_AND_MASK_LE(__pdesc+8, 26, 2) #define GET_TX_DESC_TX_ANTL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 28, 2) + SHIFT_AND_MASK_LE(__pdesc+8, 28, 2) #define GET_TX_DESC_TX_ANT_HT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 30, 2) + SHIFT_AND_MASK_LE(__pdesc+8, 30, 2) #define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val) + SET_BITS_OFFSET_LE(__pdesc+12, 0, 8, __val) #define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val) + SET_BITS_OFFSET_LE(__pdesc+12, 8, 8, __val) #define SET_TX_DESC_SEQ(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val) + SET_BITS_OFFSET_LE(__pdesc+12, 16, 12, __val) #define SET_TX_DESC_PKT_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 4, __val) + SET_BITS_OFFSET_LE(__pdesc+12, 28, 4, __val) #define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 0, 8) + SHIFT_AND_MASK_LE(__pdesc+12, 0, 8) #define GET_TX_DESC_TAIL_PAGE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 8, 8) + SHIFT_AND_MASK_LE(__pdesc+12, 8, 8) #define GET_TX_DESC_SEQ(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 16, 12) + SHIFT_AND_MASK_LE(__pdesc+12, 16, 12) #define GET_TX_DESC_PKT_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 28, 4) + SHIFT_AND_MASK_LE(__pdesc+12, 28, 4) #define SET_TX_DESC_RTS_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 0, 5, __val) #define SET_TX_DESC_AP_DCFE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 5, 1, __val) #define SET_TX_DESC_QOS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 6, 1, __val) #define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 7, 1, __val) #define SET_TX_DESC_USE_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 8, 1, __val) #define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 9, 1, __val) #define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 10, 1, __val) #define SET_TX_DESC_CTS2SELF(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 11, 1, __val) #define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 12, 1, __val) #define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 13, 1, __val) #define SET_TX_DESC_PORT_ID(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 14, 1, __val) #define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 18, 1, __val) #define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 19, 1, __val) #define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 20, 2, __val) #define SET_TX_DESC_TX_STBC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 22, 2, __val) #define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 24, 1, __val) #define SET_TX_DESC_DATA_BW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 25, 1, __val) #define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 26, 1, __val) #define SET_TX_DESC_RTS_BW(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 27, 1, __val) #define SET_TX_DESC_RTS_SC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 28, 2, __val) #define SET_TX_DESC_RTS_STBC(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val) + SET_BITS_OFFSET_LE(__pdesc+16, 30, 2, __val) #define GET_TX_DESC_RTS_RATE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 0, 5) + SHIFT_AND_MASK_LE(__pdesc+16, 0, 5) #define GET_TX_DESC_AP_DCFE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 5, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 5, 1) #define GET_TX_DESC_QOS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 6, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 6, 1) #define GET_TX_DESC_HWSEQ_EN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 7, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 7, 1) #define GET_TX_DESC_USE_RATE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 8, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 8, 1) #define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 9, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 9, 1) #define GET_TX_DESC_DISABLE_FB(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 10, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 10, 1) #define GET_TX_DESC_CTS2SELF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 11, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 11, 1) #define GET_TX_DESC_RTS_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 12, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 12, 1) #define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 13, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 13, 1) #define GET_TX_DESC_PORT_ID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 14, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 14, 1) #define GET_TX_DESC_WAIT_DCTS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 18, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 18, 1) #define GET_TX_DESC_CTS2AP_EN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 19, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 19, 1) #define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 20, 2) + SHIFT_AND_MASK_LE(__pdesc+16, 20, 2) #define GET_TX_DESC_TX_STBC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 22, 2) + SHIFT_AND_MASK_LE(__pdesc+16, 22, 2) #define GET_TX_DESC_DATA_SHORT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 24, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 24, 1) #define GET_TX_DESC_DATA_BW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 25, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 25, 1) #define GET_TX_DESC_RTS_SHORT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 26, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 26, 1) #define GET_TX_DESC_RTS_BW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 27, 1) + SHIFT_AND_MASK_LE(__pdesc+16, 27, 1) #define GET_TX_DESC_RTS_SC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 28, 2) + SHIFT_AND_MASK_LE(__pdesc+16, 28, 2) #define GET_TX_DESC_RTS_STBC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 30, 2) + SHIFT_AND_MASK_LE(__pdesc+16, 30, 2) #define SET_TX_DESC_TX_RATE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val) + SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val) #define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val) #define SET_TX_DESC_CCX_TAG(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val) #define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val) + SET_BITS_OFFSET_LE(__pdesc+20, 8, 5, __val) #define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val) + SET_BITS_OFFSET_LE(__pdesc+20, 13, 4, __val) #define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+20, 17, 1, __val) #define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val) + SET_BITS_OFFSET_LE(__pdesc+20, 18, 6, __val) #define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val) + SET_BITS_OFFSET_LE(__pdesc+20, 24, 8, __val) #define GET_TX_DESC_TX_RATE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 0, 6) + SHIFT_AND_MASK_LE(__pdesc+20, 0, 6) #define GET_TX_DESC_DATA_SHORTGI(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 6, 1) + SHIFT_AND_MASK_LE(__pdesc+20, 6, 1) #define GET_TX_DESC_CCX_TAG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 7, 1) + SHIFT_AND_MASK_LE(__pdesc+20, 7, 1) #define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 8, 5) + SHIFT_AND_MASK_LE(__pdesc+20, 8, 5) #define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 13, 4) + SHIFT_AND_MASK_LE(__pdesc+20, 13, 4) #define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 17, 1) + SHIFT_AND_MASK_LE(__pdesc+20, 17, 1) #define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 18, 6) + SHIFT_AND_MASK_LE(__pdesc+20, 18, 6) #define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 24, 8) + SHIFT_AND_MASK_LE(__pdesc+20, 24, 8) #define SET_TX_DESC_TXAGC_A(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val) + SET_BITS_OFFSET_LE(__pdesc+24, 0, 5, __val) #define SET_TX_DESC_TXAGC_B(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val) + SET_BITS_OFFSET_LE(__pdesc+24, 5, 5, __val) #define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val) + SET_BITS_OFFSET_LE(__pdesc+24, 10, 1, __val) #define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val) + SET_BITS_OFFSET_LE(__pdesc+24, 11, 5, __val) #define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val) + SET_BITS_OFFSET_LE(__pdesc+24, 16, 4, __val) #define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val) + SET_BITS_OFFSET_LE(__pdesc+24, 20, 4, __val) #define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val) + SET_BITS_OFFSET_LE(__pdesc+24, 24, 4, __val) #define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val) + SET_BITS_OFFSET_LE(__pdesc+24, 28, 4, __val) #define GET_TX_DESC_TXAGC_A(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 0, 5) + SHIFT_AND_MASK_LE(__pdesc+24, 0, 5) #define GET_TX_DESC_TXAGC_B(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 5, 5) + SHIFT_AND_MASK_LE(__pdesc+24, 5, 5) #define GET_TX_DESC_USE_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 10, 1) + SHIFT_AND_MASK_LE(__pdesc+24, 10, 1) #define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 11, 5) + SHIFT_AND_MASK_LE(__pdesc+24, 11, 5) #define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 16, 4) + SHIFT_AND_MASK_LE(__pdesc+24, 16, 4) #define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 20, 4) + SHIFT_AND_MASK_LE(__pdesc+24, 20, 4) #define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 24, 4) + SHIFT_AND_MASK_LE(__pdesc+24, 24, 4) #define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 28, 4) + SHIFT_AND_MASK_LE(__pdesc+24, 28, 4) #define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val) + SET_BITS_OFFSET_LE(__pdesc+28, 0, 16, __val) #define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 4, __val) + SET_BITS_OFFSET_LE(__pdesc+28, 16, 4, __val) #define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 20, 4, __val) + SET_BITS_OFFSET_LE(__pdesc+28, 20, 4, __val) #define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val) + SET_BITS_OFFSET_LE(__pdesc+28, 24, 4, __val) #define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 28, 4, __val) + SET_BITS_OFFSET_LE(__pdesc+28, 28, 4, __val) #define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 0, 16) + SHIFT_AND_MASK_LE(__pdesc+28, 0, 16) #define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 16, 4) + SHIFT_AND_MASK_LE(__pdesc+28, 16, 4) #define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 20, 4) + SHIFT_AND_MASK_LE(__pdesc+28, 20, 4) #define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 24, 4) + SHIFT_AND_MASK_LE(__pdesc+28, 24, 4) #define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 28, 4) + SHIFT_AND_MASK_LE(__pdesc+28, 28, 4) #define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val) + SET_BITS_OFFSET_LE(__pdesc+32, 0, 32, __val) #define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val) + SET_BITS_OFFSET_LE(__pdesc+36, 0, 32, __val) #define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+32, 0, 32) + SHIFT_AND_MASK_LE(__pdesc+32, 0, 32) #define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+36, 0, 32) + SHIFT_AND_MASK_LE(__pdesc+36, 0, 32) #define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val) + SET_BITS_OFFSET_LE(__pdesc+40, 0, 32, __val) #define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val) + SET_BITS_OFFSET_LE(__pdesc+44, 0, 32, __val) #define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+40, 0, 32) + SHIFT_AND_MASK_LE(__pdesc+40, 0, 32) #define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+44, 0, 32) + SHIFT_AND_MASK_LE(__pdesc+44, 0, 32) #define GET_RX_DESC_PKT_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 0, 14) + SHIFT_AND_MASK_LE(__pdesc, 0, 14) #define GET_RX_DESC_CRC32(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 14, 1) + SHIFT_AND_MASK_LE(__pdesc, 14, 1) #define GET_RX_DESC_ICV(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 15, 1) + SHIFT_AND_MASK_LE(__pdesc, 15, 1) #define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 16, 4) + SHIFT_AND_MASK_LE(__pdesc, 16, 4) #define GET_RX_DESC_SECURITY(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 20, 3) + SHIFT_AND_MASK_LE(__pdesc, 20, 3) #define GET_RX_DESC_QOS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 23, 1) + SHIFT_AND_MASK_LE(__pdesc, 23, 1) #define GET_RX_DESC_SHIFT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 24, 2) + SHIFT_AND_MASK_LE(__pdesc, 24, 2) #define GET_RX_DESC_PHYST(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 26, 1) + SHIFT_AND_MASK_LE(__pdesc, 26, 1) #define GET_RX_DESC_SWDEC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 27, 1) + SHIFT_AND_MASK_LE(__pdesc, 27, 1) #define GET_RX_DESC_LS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 28, 1) + SHIFT_AND_MASK_LE(__pdesc, 28, 1) #define GET_RX_DESC_FS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 29, 1) + SHIFT_AND_MASK_LE(__pdesc, 29, 1) #define GET_RX_DESC_EOR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 30, 1) + SHIFT_AND_MASK_LE(__pdesc, 30, 1) #define GET_RX_DESC_OWN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc, 31, 1) + SHIFT_AND_MASK_LE(__pdesc, 31, 1) #define SET_RX_DESC_PKT_LEN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val) + SET_BITS_OFFSET_LE(__pdesc, 0, 14, __val) #define SET_RX_DESC_EOR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val) + SET_BITS_OFFSET_LE(__pdesc, 30, 1, __val) #define SET_RX_DESC_OWN(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val) + SET_BITS_OFFSET_LE(__pdesc, 31, 1, __val) #define GET_RX_DESC_MACID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 0, 5) + SHIFT_AND_MASK_LE(__pdesc+4, 0, 5) #define GET_RX_DESC_TID(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 5, 4) + SHIFT_AND_MASK_LE(__pdesc+4, 5, 4) #define GET_RX_DESC_HWRSVD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 9, 5) + SHIFT_AND_MASK_LE(__pdesc+4, 9, 5) #define GET_RX_DESC_PAGGR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 14, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 14, 1) #define GET_RX_DESC_FAGGR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 15, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 15, 1) #define GET_RX_DESC_A1_FIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 16, 4) + SHIFT_AND_MASK_LE(__pdesc+4, 16, 4) #define GET_RX_DESC_A2_FIT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 20, 4) + SHIFT_AND_MASK_LE(__pdesc+4, 20, 4) #define GET_RX_DESC_PAM(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 24, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 24, 1) #define GET_RX_DESC_PWR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 25, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 25, 1) #define GET_RX_DESC_MD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 26, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 26, 1) #define GET_RX_DESC_MF(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 27, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 27, 1) #define GET_RX_DESC_TYPE(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 28, 2) + SHIFT_AND_MASK_LE(__pdesc+4, 28, 2) #define GET_RX_DESC_MC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 30, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 30, 1) #define GET_RX_DESC_BC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+4, 31, 1) + SHIFT_AND_MASK_LE(__pdesc+4, 31, 1) #define GET_RX_DESC_SEQ(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 0, 12) + SHIFT_AND_MASK_LE(__pdesc+8, 0, 12) #define GET_RX_DESC_FRAG(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 12, 4) + SHIFT_AND_MASK_LE(__pdesc+8, 12, 4) #define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 16, 14) + SHIFT_AND_MASK_LE(__pdesc+8, 16, 14) #define GET_RX_DESC_NEXT_IND(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 30, 1) + SHIFT_AND_MASK_LE(__pdesc+8, 30, 1) #define GET_RX_DESC_RSVD(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+8, 31, 1) + SHIFT_AND_MASK_LE(__pdesc+8, 31, 1) #define GET_RX_DESC_RXMCS(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 0, 6) + SHIFT_AND_MASK_LE(__pdesc+12, 0, 6) #define GET_RX_DESC_RXHT(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 6, 1) + SHIFT_AND_MASK_LE(__pdesc+12, 6, 1) #define GET_RX_DESC_SPLCP(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 8, 1) + SHIFT_AND_MASK_LE(__pdesc+12, 8, 1) #define GET_RX_DESC_BW(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 9, 1) + SHIFT_AND_MASK_LE(__pdesc+12, 9, 1) #define GET_RX_DESC_HTC(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 10, 1) + SHIFT_AND_MASK_LE(__pdesc+12, 10, 1) #define GET_RX_DESC_HWPC_ERR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 14, 1) + SHIFT_AND_MASK_LE(__pdesc+12, 14, 1) #define GET_RX_DESC_HWPC_IND(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 15, 1) + SHIFT_AND_MASK_LE(__pdesc+12, 15, 1) #define GET_RX_DESC_IV0(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+12, 16, 16) + SHIFT_AND_MASK_LE(__pdesc+12, 16, 16) #define GET_RX_DESC_IV1(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+16, 0, 32) + SHIFT_AND_MASK_LE(__pdesc+16, 0, 32) #define GET_RX_DESC_TSFL(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+20, 0, 32) + SHIFT_AND_MASK_LE(__pdesc+20, 0, 32) #define GET_RX_DESC_BUFF_ADDR(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+24, 0, 32) + SHIFT_AND_MASK_LE(__pdesc+24, 0, 32) #define GET_RX_DESC_BUFF_ADDR64(__pdesc) \ - LE_BITS_TO_4BYTE(__pdesc+28, 0, 32) + SHIFT_AND_MASK_LE(__pdesc+28, 0, 32) #define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val) + SET_BITS_OFFSET_LE(__pdesc+24, 0, 32, __val) #define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \ - SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val) + SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val) #define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ do { \ @@ -711,4 +735,6 @@ void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue); void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool b_firstseg, bool b_lastseg, struct sk_buff *skb); +bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); + #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile new file mode 100644 index 000000000000..ad2de6b839ef --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/Makefile @@ -0,0 +1,14 @@ +rtl8192cu-objs := \ + dm.o \ + hw.o \ + led.o \ + mac.o \ + phy.o \ + rf.o \ + sw.o \ + table.o \ + trx.o + +obj-$(CONFIG_RTL8192CU) += rtl8192cu.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h new file mode 100644 index 000000000000..c54940ea72fe --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h @@ -0,0 +1,62 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../rtl8192ce/def.h" + +/*------------------------------------------------------------------------- + * Chip specific + *-------------------------------------------------------------------------*/ +#define CHIP_8723 BIT(2) /* RTL8723 With BT feature */ +#define CHIP_8723_DRV_REV BIT(3) /* RTL8723 Driver Revised */ +#define NORMAL_CHIP BIT(4) +#define CHIP_VENDOR_UMC BIT(5) +#define CHIP_VENDOR_UMC_B_CUT BIT(6) + +#define IS_NORMAL_CHIP(version) \ + (((version) & NORMAL_CHIP) ? true : false) + +#define IS_8723_SERIES(version) \ + (((version) & CHIP_8723) ? true : false) + +#define IS_92C_1T2R(version) \ + (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R)) + +#define IS_VENDOR_UMC(version) \ + (((version) & CHIP_VENDOR_UMC) ? true : false) + +#define IS_VENDOR_UMC_A_CUT(version) \ + (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6) | BIT(7))) ? \ + false : true) : false) + +#define IS_VENDOR_8723_A_CUT(version) \ + (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \ + false : true) : false) + +#define CHIP_BONDING_92C_1T2R 0x1 +#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3) diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c new file mode 100644 index 000000000000..f311baee668d --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.c @@ -0,0 +1,113 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../base.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "dm.h" + +void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + long undecorated_smoothed_pwdb; + + if (!rtlpriv->dm.dynamic_txpower_enable) + return; + + if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) { + rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; + return; + } + + if ((mac->link_state < MAC80211_LINKED) && + (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { + RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, + ("Not connected to any\n")); + + rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; + + rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL; + return; + } + + if (mac->link_state >= MAC80211_LINKED) { + if (mac->opmode == NL80211_IFTYPE_ADHOC) { + undecorated_smoothed_pwdb = + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + ("AP Client PWDB = 0x%lx\n", + undecorated_smoothed_pwdb)); + } else { + undecorated_smoothed_pwdb = + rtlpriv->dm.undecorated_smoothed_pwdb; + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + ("STA Default Port PWDB = 0x%lx\n", + undecorated_smoothed_pwdb)); + } + } else { + undecorated_smoothed_pwdb = + rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; + + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + ("AP Ext Port PWDB = 0x%lx\n", + undecorated_smoothed_pwdb)); + } + + if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) { + rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n")); + } else if ((undecorated_smoothed_pwdb < + (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) && + (undecorated_smoothed_pwdb >= + TX_POWER_NEAR_FIELD_THRESH_LVL1)) { + + rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1; + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n")); + } else if (undecorated_smoothed_pwdb < + (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) { + rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL; + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + ("TXHIGHPWRLEVEL_NORMAL\n")); + } + + if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) { + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, + ("PHY_SetTxPowerLevel8192S() Channel = %d\n", + rtlphy->current_channel)); + rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel); + } + + rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h new file mode 100644 index 000000000000..7f966c666b5a --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/dm.h @@ -0,0 +1,32 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../rtl8192ce/dm.h" + +void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c new file mode 100644 index 000000000000..9444e76838cf --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -0,0 +1,2504 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../efuse.h" +#include "../base.h" +#include "../cam.h" +#include "../ps.h" +#include "../usb.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "mac.h" +#include "dm.h" +#include "hw.h" +#include "trx.h" +#include "led.h" +#include "table.h" + +static void _rtl92cu_phy_param_tab_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + + rtlphy->hwparam_tables[MAC_REG].length = RTL8192CUMAC_2T_ARRAYLENGTH; + rtlphy->hwparam_tables[MAC_REG].pdata = RTL8192CUMAC_2T_ARRAY; + if (IS_HIGHT_PA(rtlefuse->board_type)) { + rtlphy->hwparam_tables[PHY_REG_PG].length = + RTL8192CUPHY_REG_Array_PG_HPLength; + rtlphy->hwparam_tables[PHY_REG_PG].pdata = + RTL8192CUPHY_REG_Array_PG_HP; + } else { + rtlphy->hwparam_tables[PHY_REG_PG].length = + RTL8192CUPHY_REG_ARRAY_PGLENGTH; + rtlphy->hwparam_tables[PHY_REG_PG].pdata = + RTL8192CUPHY_REG_ARRAY_PG; + } + /* 2T */ + rtlphy->hwparam_tables[PHY_REG_2T].length = + RTL8192CUPHY_REG_2TARRAY_LENGTH; + rtlphy->hwparam_tables[PHY_REG_2T].pdata = + RTL8192CUPHY_REG_2TARRAY; + rtlphy->hwparam_tables[RADIOA_2T].length = + RTL8192CURADIOA_2TARRAYLENGTH; + rtlphy->hwparam_tables[RADIOA_2T].pdata = + RTL8192CURADIOA_2TARRAY; + rtlphy->hwparam_tables[RADIOB_2T].length = + RTL8192CURADIOB_2TARRAYLENGTH; + rtlphy->hwparam_tables[RADIOB_2T].pdata = + RTL8192CU_RADIOB_2TARRAY; + rtlphy->hwparam_tables[AGCTAB_2T].length = + RTL8192CUAGCTAB_2TARRAYLENGTH; + rtlphy->hwparam_tables[AGCTAB_2T].pdata = + RTL8192CUAGCTAB_2TARRAY; + /* 1T */ + if (IS_HIGHT_PA(rtlefuse->board_type)) { + rtlphy->hwparam_tables[PHY_REG_1T].length = + RTL8192CUPHY_REG_1T_HPArrayLength; + rtlphy->hwparam_tables[PHY_REG_1T].pdata = + RTL8192CUPHY_REG_1T_HPArray; + rtlphy->hwparam_tables[RADIOA_1T].length = + RTL8192CURadioA_1T_HPArrayLength; + rtlphy->hwparam_tables[RADIOA_1T].pdata = + RTL8192CURadioA_1T_HPArray; + rtlphy->hwparam_tables[RADIOB_1T].length = + RTL8192CURADIOB_1TARRAYLENGTH; + rtlphy->hwparam_tables[RADIOB_1T].pdata = + RTL8192CU_RADIOB_1TARRAY; + rtlphy->hwparam_tables[AGCTAB_1T].length = + RTL8192CUAGCTAB_1T_HPArrayLength; + rtlphy->hwparam_tables[AGCTAB_1T].pdata = + Rtl8192CUAGCTAB_1T_HPArray; + } else { + rtlphy->hwparam_tables[PHY_REG_1T].length = + RTL8192CUPHY_REG_1TARRAY_LENGTH; + rtlphy->hwparam_tables[PHY_REG_1T].pdata = + RTL8192CUPHY_REG_1TARRAY; + rtlphy->hwparam_tables[RADIOA_1T].length = + RTL8192CURADIOA_1TARRAYLENGTH; + rtlphy->hwparam_tables[RADIOA_1T].pdata = + RTL8192CU_RADIOA_1TARRAY; + rtlphy->hwparam_tables[RADIOB_1T].length = + RTL8192CURADIOB_1TARRAYLENGTH; + rtlphy->hwparam_tables[RADIOB_1T].pdata = + RTL8192CU_RADIOB_1TARRAY; + rtlphy->hwparam_tables[AGCTAB_1T].length = + RTL8192CUAGCTAB_1TARRAYLENGTH; + rtlphy->hwparam_tables[AGCTAB_1T].pdata = + RTL8192CUAGCTAB_1TARRAY; + } +} + +static void _rtl92cu_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, + bool autoload_fail, + u8 *hwinfo) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 rf_path, index, tempval; + u16 i; + + for (rf_path = 0; rf_path < 2; rf_path++) { + for (i = 0; i < 3; i++) { + if (!autoload_fail) { + rtlefuse-> + eeprom_chnlarea_txpwr_cck[rf_path][i] = + hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i]; + rtlefuse-> + eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] = + hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * 3 + + i]; + } else { + rtlefuse-> + eeprom_chnlarea_txpwr_cck[rf_path][i] = + EEPROM_DEFAULT_TXPOWERLEVEL; + rtlefuse-> + eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] = + EEPROM_DEFAULT_TXPOWERLEVEL; + } + } + } + for (i = 0; i < 3; i++) { + if (!autoload_fail) + tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i]; + else + tempval = EEPROM_DEFAULT_HT40_2SDIFF; + rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] = + (tempval & 0xf); + rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] = + ((tempval & 0xf0) >> 4); + } + for (rf_path = 0; rf_path < 2; rf_path++) + for (i = 0; i < 3; i++) + RTPRINT(rtlpriv, FINIT, INIT_EEPROM, + ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path, + i, rtlefuse-> + eeprom_chnlarea_txpwr_cck[rf_path][i])); + for (rf_path = 0; rf_path < 2; rf_path++) + for (i = 0; i < 3; i++) + RTPRINT(rtlpriv, FINIT, INIT_EEPROM, + ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n", + rf_path, i, + rtlefuse-> + eeprom_chnlarea_txpwr_ht40_1s[rf_path][i])); + for (rf_path = 0; rf_path < 2; rf_path++) + for (i = 0; i < 3; i++) + RTPRINT(rtlpriv, FINIT, INIT_EEPROM, + ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n", + rf_path, i, + rtlefuse-> + eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path] + [i])); + for (rf_path = 0; rf_path < 2; rf_path++) { + for (i = 0; i < 14; i++) { + index = _rtl92c_get_chnl_group((u8) i); + rtlefuse->txpwrlevel_cck[rf_path][i] = + rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index]; + rtlefuse->txpwrlevel_ht40_1s[rf_path][i] = + rtlefuse-> + eeprom_chnlarea_txpwr_ht40_1s[rf_path][index]; + if ((rtlefuse-> + eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] - + rtlefuse-> + eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index]) + > 0) { + rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = + rtlefuse-> + eeprom_chnlarea_txpwr_ht40_1s[rf_path] + [index] - rtlefuse-> + eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path] + [index]; + } else { + rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0; + } + } + for (i = 0; i < 14; i++) { + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = " + "[0x%x / 0x%x / 0x%x]\n", rf_path, i, + rtlefuse->txpwrlevel_cck[rf_path][i], + rtlefuse->txpwrlevel_ht40_1s[rf_path][i], + rtlefuse->txpwrlevel_ht40_2s[rf_path][i])); + } + } + for (i = 0; i < 3; i++) { + if (!autoload_fail) { + rtlefuse->eeprom_pwrlimit_ht40[i] = + hwinfo[EEPROM_TXPWR_GROUP + i]; + rtlefuse->eeprom_pwrlimit_ht20[i] = + hwinfo[EEPROM_TXPWR_GROUP + 3 + i]; + } else { + rtlefuse->eeprom_pwrlimit_ht40[i] = 0; + rtlefuse->eeprom_pwrlimit_ht20[i] = 0; + } + } + for (rf_path = 0; rf_path < 2; rf_path++) { + for (i = 0; i < 14; i++) { + index = _rtl92c_get_chnl_group((u8) i); + if (rf_path == RF90_PATH_A) { + rtlefuse->pwrgroup_ht20[rf_path][i] = + (rtlefuse->eeprom_pwrlimit_ht20[index] + & 0xf); + rtlefuse->pwrgroup_ht40[rf_path][i] = + (rtlefuse->eeprom_pwrlimit_ht40[index] + & 0xf); + } else if (rf_path == RF90_PATH_B) { + rtlefuse->pwrgroup_ht20[rf_path][i] = + ((rtlefuse->eeprom_pwrlimit_ht20[index] + & 0xf0) >> 4); + rtlefuse->pwrgroup_ht40[rf_path][i] = + ((rtlefuse->eeprom_pwrlimit_ht40[index] + & 0xf0) >> 4); + } + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("RF-%d pwrgroup_ht20[%d] = 0x%x\n", + rf_path, i, + rtlefuse->pwrgroup_ht20[rf_path][i])); + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("RF-%d pwrgroup_ht40[%d] = 0x%x\n", + rf_path, i, + rtlefuse->pwrgroup_ht40[rf_path][i])); + } + } + for (i = 0; i < 14; i++) { + index = _rtl92c_get_chnl_group((u8) i); + if (!autoload_fail) + tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index]; + else + tempval = EEPROM_DEFAULT_HT20_DIFF; + rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF); + rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] = + ((tempval >> 4) & 0xF); + if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3)) + rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0; + if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3)) + rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0; + index = _rtl92c_get_chnl_group((u8) i); + if (!autoload_fail) + tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index]; + else + tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF; + rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF); + rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] = + ((tempval >> 4) & 0xF); + } + rtlefuse->legacy_ht_txpowerdiff = + rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7]; + for (i = 0; i < 14; i++) + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i, + rtlefuse->txpwr_ht20diff[RF90_PATH_A][i])); + for (i = 0; i < 14; i++) + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i, + rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i])); + for (i = 0; i < 14; i++) + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i, + rtlefuse->txpwr_ht20diff[RF90_PATH_B][i])); + for (i = 0; i < 14; i++) + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i, + rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i])); + if (!autoload_fail) + rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7); + else + rtlefuse->eeprom_regulatory = 0; + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory)); + if (!autoload_fail) { + rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A]; + rtlefuse->eeprom_tssi[RF90_PATH_B] = hwinfo[EEPROM_TSSI_B]; + } else { + rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI; + rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI; + } + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("TSSI_A = 0x%x, TSSI_B = 0x%x\n", + rtlefuse->eeprom_tssi[RF90_PATH_A], + rtlefuse->eeprom_tssi[RF90_PATH_B])); + if (!autoload_fail) + tempval = hwinfo[EEPROM_THERMAL_METER]; + else + tempval = EEPROM_DEFAULT_THERMALMETER; + rtlefuse->eeprom_thermalmeter = (tempval & 0x1f); + if (rtlefuse->eeprom_thermalmeter < 0x06 || + rtlefuse->eeprom_thermalmeter > 0x1c) + rtlefuse->eeprom_thermalmeter = 0x12; + if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail) + rtlefuse->apk_thermalmeterignore = true; + rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter; + RTPRINT(rtlpriv, FINIT, INIT_TxPower, + ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter)); +} + +static void _rtl92cu_read_board_type(struct ieee80211_hw *hw, u8 *contents) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 boardType; + + if (IS_NORMAL_CHIP(rtlhal->version)) { + boardType = ((contents[EEPROM_RF_OPT1]) & + BOARD_TYPE_NORMAL_MASK) >> 5; /*bit[7:5]*/ + } else { + boardType = contents[EEPROM_RF_OPT4]; + boardType &= BOARD_TYPE_TEST_MASK; + } + rtlefuse->board_type = boardType; + if (IS_HIGHT_PA(rtlefuse->board_type)) + rtlefuse->external_pa = 1; + printk(KERN_INFO "rtl8192cu: Board Type %x\n", rtlefuse->board_type); + +#ifdef CONFIG_ANTENNA_DIVERSITY + /* Antenna Diversity setting. */ + if (registry_par->antdiv_cfg == 2) /* 2: From Efuse */ + rtl_efuse->antenna_cfg = (contents[EEPROM_RF_OPT1]&0x18)>>3; + else + rtl_efuse->antenna_cfg = registry_par->antdiv_cfg; /* 0:OFF, */ + + printk(KERN_INFO "rtl8192cu: Antenna Config %x\n", + rtl_efuse->antenna_cfg); +#endif +} + +#ifdef CONFIG_BT_COEXIST +static void _update_bt_param(_adapter *padapter) +{ + struct btcoexist_priv *pbtpriv = &(padapter->halpriv.bt_coexist); + struct registry_priv *registry_par = &padapter->registrypriv; + if (2 != registry_par->bt_iso) { + /* 0:Low, 1:High, 2:From Efuse */ + pbtpriv->BT_Ant_isolation = registry_par->bt_iso; + } + if (registry_par->bt_sco == 1) { + /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter, 4.Busy, + * 5.OtherBusy */ + pbtpriv->BT_Service = BT_OtherAction; + } else if (registry_par->bt_sco == 2) { + pbtpriv->BT_Service = BT_SCO; + } else if (registry_par->bt_sco == 4) { + pbtpriv->BT_Service = BT_Busy; + } else if (registry_par->bt_sco == 5) { + pbtpriv->BT_Service = BT_OtherBusy; + } else { + pbtpriv->BT_Service = BT_Idle; + } + pbtpriv->BT_Ampdu = registry_par->bt_ampdu; + pbtpriv->bCOBT = _TRUE; + pbtpriv->BtEdcaUL = 0; + pbtpriv->BtEdcaDL = 0; + pbtpriv->BtRssiState = 0xff; + pbtpriv->bInitSet = _FALSE; + pbtpriv->bBTBusyTraffic = _FALSE; + pbtpriv->bBTTrafficModeSet = _FALSE; + pbtpriv->bBTNonTrafficModeSet = _FALSE; + pbtpriv->CurrentState = 0; + pbtpriv->PreviousState = 0; + printk(KERN_INFO "rtl8192cu: BT Coexistance = %s\n", + (pbtpriv->BT_Coexist == _TRUE) ? "enable" : "disable"); + if (pbtpriv->BT_Coexist) { + if (pbtpriv->BT_Ant_Num == Ant_x2) + printk(KERN_INFO "rtl8192cu: BlueTooth BT_" + "Ant_Num = Antx2\n"); + else if (pbtpriv->BT_Ant_Num == Ant_x1) + printk(KERN_INFO "rtl8192cu: BlueTooth BT_" + "Ant_Num = Antx1\n"); + switch (pbtpriv->BT_CoexistType) { + case BT_2Wire: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_" + "CoexistType = BT_2Wire\n"); + break; + case BT_ISSC_3Wire: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_" + "CoexistType = BT_ISSC_3Wire\n"); + break; + case BT_Accel: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_" + "CoexistType = BT_Accel\n"); + break; + case BT_CSR_BC4: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_" + "CoexistType = BT_CSR_BC4\n"); + break; + case BT_CSR_BC8: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_" + "CoexistType = BT_CSR_BC8\n"); + break; + case BT_RTL8756: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_" + "CoexistType = BT_RTL8756\n"); + break; + default: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_" + "CoexistType = Unknown\n"); + break; + } + printk(KERN_INFO "rtl8192cu: BlueTooth BT_Ant_isolation = %d\n", + pbtpriv->BT_Ant_isolation); + switch (pbtpriv->BT_Service) { + case BT_OtherAction: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = " + "BT_OtherAction\n"); + break; + case BT_SCO: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = " + "BT_SCO\n"); + break; + case BT_Busy: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = " + "BT_Busy\n"); + break; + case BT_OtherBusy: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = " + "BT_OtherBusy\n"); + break; + default: + printk(KERN_INFO "rtl8192cu: BlueTooth BT_Service = " + "BT_Idle\n"); + break; + } + printk(KERN_INFO "rtl8192cu: BT_RadioSharedType = 0x%x\n", + pbtpriv->BT_RadioSharedType); + } +} + +#define GET_BT_COEXIST(priv) (&priv->bt_coexist) + +static void _rtl92cu_read_bluetooth_coexistInfo(struct ieee80211_hw *hw, + u8 *contents, + bool bautoloadfailed); +{ + HAL_DATA_TYPE *pHalData = GET_HAL_DATA(Adapter); + bool isNormal = IS_NORMAL_CHIP(pHalData->VersionID); + struct btcoexist_priv *pbtpriv = &pHalData->bt_coexist; + u8 rf_opt4; + + _rtw_memset(pbtpriv, 0, sizeof(struct btcoexist_priv)); + if (AutoloadFail) { + pbtpriv->BT_Coexist = _FALSE; + pbtpriv->BT_CoexistType = BT_2Wire; + pbtpriv->BT_Ant_Num = Ant_x2; + pbtpriv->BT_Ant_isolation = 0; + pbtpriv->BT_RadioSharedType = BT_Radio_Shared; + return; + } + if (isNormal) { + if (pHalData->BoardType == BOARD_USB_COMBO) + pbtpriv->BT_Coexist = _TRUE; + else + pbtpriv->BT_Coexist = ((PROMContent[EEPROM_RF_OPT3] & + 0x20) >> 5); /* bit[5] */ + rf_opt4 = PROMContent[EEPROM_RF_OPT4]; + pbtpriv->BT_CoexistType = ((rf_opt4&0xe)>>1); /* bit [3:1] */ + pbtpriv->BT_Ant_Num = (rf_opt4&0x1); /* bit [0] */ + pbtpriv->BT_Ant_isolation = ((rf_opt4&0x10)>>4); /* bit [4] */ + pbtpriv->BT_RadioSharedType = ((rf_opt4&0x20)>>5); /* bit [5] */ + } else { + pbtpriv->BT_Coexist = (PROMContent[EEPROM_RF_OPT4] >> 4) ? + _TRUE : _FALSE; + } + _update_bt_param(Adapter); +} +#endif + +static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u16 i, usvalue; + u8 hwinfo[HWSET_MAX_SIZE] = {0}; + u16 eeprom_id; + + if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { + rtl_efuse_shadow_map_update(hw); + memcpy((void *)hwinfo, + (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + HWSET_MAX_SIZE); + } else if (rtlefuse->epromtype == EEPROM_93C46) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("RTL819X Not boot from eeprom, check it !!")); + } + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"), + hwinfo, HWSET_MAX_SIZE); + eeprom_id = *((u16 *)&hwinfo[0]); + if (eeprom_id != RTL8190_EEPROM_ID) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("EEPROM ID(%#x) is invalid!!\n", eeprom_id)); + rtlefuse->autoload_failflag = true; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n")); + rtlefuse->autoload_failflag = false; + } + if (rtlefuse->autoload_failflag == true) + return; + for (i = 0; i < 6; i += 2) { + usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; + *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue; + } + printk(KERN_INFO "rtl8192cu: MAC address: %pM\n", rtlefuse->dev_addr); + _rtl92cu_read_txpower_info_from_hwpg(hw, + rtlefuse->autoload_failflag, hwinfo); + rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; + rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + (" VID = 0x%02x PID = 0x%02x\n", + rtlefuse->eeprom_vid, rtlefuse->eeprom_did)); + rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; + rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; + rtlefuse->txpwr_fromeprom = true; + rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid)); + if (rtlhal->oem_id == RT_CID_DEFAULT) { + switch (rtlefuse->eeprom_oemid) { + case EEPROM_CID_DEFAULT: + if (rtlefuse->eeprom_did == 0x8176) { + if ((rtlefuse->eeprom_svid == 0x103C && + rtlefuse->eeprom_smid == 0x1629)) + rtlhal->oem_id = RT_CID_819x_HP; + else + rtlhal->oem_id = RT_CID_DEFAULT; + } else { + rtlhal->oem_id = RT_CID_DEFAULT; + } + break; + case EEPROM_CID_TOSHIBA: + rtlhal->oem_id = RT_CID_TOSHIBA; + break; + case EEPROM_CID_QMI: + rtlhal->oem_id = RT_CID_819x_QMI; + break; + case EEPROM_CID_WHQL: + default: + rtlhal->oem_id = RT_CID_DEFAULT; + break; + } + } + _rtl92cu_read_board_type(hw, hwinfo); +#ifdef CONFIG_BT_COEXIST + _rtl92cu_read_bluetooth_coexistInfo(hw, hwinfo, + rtlefuse->autoload_failflag); +#endif +} + +static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + switch (rtlhal->oem_id) { + case RT_CID_819x_HP: + usb_priv->ledctl.led_opendrain = true; + break; + case RT_CID_819x_Lenovo: + case RT_CID_DEFAULT: + case RT_CID_TOSHIBA: + case RT_CID_CCX: + case RT_CID_819x_Acer: + case RT_CID_WHQL: + default: + break; + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("RT Customized ID: 0x%02X\n", rtlhal->oem_id)); +} + +void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw) +{ + + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 tmp_u1b; + + if (!IS_NORMAL_CHIP(rtlhal->version)) + return; + tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR); + rtlefuse->epromtype = (tmp_u1b & EEPROMSEL) ? + EEPROM_93C46 : EEPROM_BOOT_EFUSE; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from %s\n", + (tmp_u1b & EEPROMSEL) ? "EERROM" : "EFUSE")); + rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload %s\n", + (tmp_u1b & EEPROM_EN) ? "OK!!" : "ERR!!")); + _rtl92cu_read_adapter_info(hw); + _rtl92cu_hal_customized_behavior(hw); + return; +} + +static int _rtl92cu_init_power_on(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int status = 0; + u16 value16; + u8 value8; + /* polling autoload done. */ + u32 pollingCount = 0; + + do { + if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("Autoload Done!\n")); + break; + } + if (pollingCount++ > 100) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, + ("Failed to polling REG_APS_FSMCO[PFM_ALDN]" + " done!\n")); + return -ENODEV; + } + } while (true); + /* 0. RSV_CTRL 0x1C[7:0] = 0 unlock ISO/CLK/Power control register */ + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0); + /* Power on when re-enter from IPS/Radio off/card disable */ + /* enable SPS into PWM mode */ + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); + udelay(100); + value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL); + if (0 == (value8 & LDV12_EN)) { + value8 |= LDV12_EN; + rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + (" power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x.\n", + value8)); + udelay(100); + value8 = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL); + value8 &= ~ISO_MD2PP; + rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, value8); + } + /* auto enable WLAN */ + pollingCount = 0; + value16 = rtl_read_word(rtlpriv, REG_APS_FSMCO); + value16 |= APFM_ONMAC; + rtl_write_word(rtlpriv, REG_APS_FSMCO, value16); + do { + if (!(rtl_read_word(rtlpriv, REG_APS_FSMCO) & APFM_ONMAC)) { + printk(KERN_INFO "rtl8192cu: MAC auto ON okay!\n"); + break; + } + if (pollingCount++ > 100) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, + ("Failed to polling REG_APS_FSMCO[APFM_ONMAC]" + " done!\n")); + return -ENODEV; + } + } while (true); + /* Enable Radio ,GPIO ,and LED function */ + rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x0812); + /* release RF digital isolation */ + value16 = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL); + value16 &= ~ISO_DIOR; + rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, value16); + /* Reconsider when to do this operation after asking HWSD. */ + pollingCount = 0; + rtl_write_byte(rtlpriv, REG_APSD_CTRL, (rtl_read_byte(rtlpriv, + REG_APSD_CTRL) & ~BIT(6))); + do { + pollingCount++; + } while ((pollingCount < 200) && + (rtl_read_byte(rtlpriv, REG_APSD_CTRL) & BIT(7))); + /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */ + value16 = rtl_read_word(rtlpriv, REG_CR); + value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN | + PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN | ENSEC); + rtl_write_word(rtlpriv, REG_CR, value16); + return status; +} + +static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw, + bool wmm_enable, + u8 out_ep_num, + u8 queue_sel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool isChipN = IS_NORMAL_CHIP(rtlhal->version); + u32 outEPNum = (u32)out_ep_num; + u32 numHQ = 0; + u32 numLQ = 0; + u32 numNQ = 0; + u32 numPubQ; + u32 value32; + u8 value8; + u32 txQPageNum, txQPageUnit, txQRemainPage; + + if (!wmm_enable) { + numPubQ = (isChipN) ? CHIP_B_PAGE_NUM_PUBQ : + CHIP_A_PAGE_NUM_PUBQ; + txQPageNum = TX_TOTAL_PAGE_NUMBER - numPubQ; + + txQPageUnit = txQPageNum/outEPNum; + txQRemainPage = txQPageNum % outEPNum; + if (queue_sel & TX_SELE_HQ) + numHQ = txQPageUnit; + if (queue_sel & TX_SELE_LQ) + numLQ = txQPageUnit; + /* HIGH priority queue always present in the configuration of + * 2 out-ep. Remainder pages have assigned to High queue */ + if ((outEPNum > 1) && (txQRemainPage)) + numHQ += txQRemainPage; + /* NOTE: This step done before writting REG_RQPN. */ + if (isChipN) { + if (queue_sel & TX_SELE_NQ) + numNQ = txQPageUnit; + value8 = (u8)_NPQ(numNQ); + rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8); + } + } else { + /* for WMM ,number of out-ep must more than or equal to 2! */ + numPubQ = isChipN ? WMM_CHIP_B_PAGE_NUM_PUBQ : + WMM_CHIP_A_PAGE_NUM_PUBQ; + if (queue_sel & TX_SELE_HQ) { + numHQ = isChipN ? WMM_CHIP_B_PAGE_NUM_HPQ : + WMM_CHIP_A_PAGE_NUM_HPQ; + } + if (queue_sel & TX_SELE_LQ) { + numLQ = isChipN ? WMM_CHIP_B_PAGE_NUM_LPQ : + WMM_CHIP_A_PAGE_NUM_LPQ; + } + /* NOTE: This step done before writting REG_RQPN. */ + if (isChipN) { + if (queue_sel & TX_SELE_NQ) + numNQ = WMM_CHIP_B_PAGE_NUM_NPQ; + value8 = (u8)_NPQ(numNQ); + rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8); + } + } + /* TX DMA */ + value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN; + rtl_write_dword(rtlpriv, REG_RQPN, value32); +} + +static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 txpktbuf_bndy; + u8 value8; + + if (!wmm_enable) + txpktbuf_bndy = TX_PAGE_BOUNDARY; + else /* for WMM */ + txpktbuf_bndy = (IS_NORMAL_CHIP(rtlhal->version)) + ? WMM_CHIP_B_TX_PAGE_BOUNDARY + : WMM_CHIP_A_TX_PAGE_BOUNDARY; + rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy); + rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy); + rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy); + rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy); + rtl_write_byte(rtlpriv, REG_TDECTRL+1, txpktbuf_bndy); + rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF); + value8 = _PSRX(RX_PAGE_SIZE_REG_VALUE) | _PSTX(PBP_128); + rtl_write_byte(rtlpriv, REG_PBP, value8); +} + +static void _rtl92c_init_chipN_reg_priority(struct ieee80211_hw *hw, u16 beQ, + u16 bkQ, u16 viQ, u16 voQ, + u16 mgtQ, u16 hiQ) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 value16 = (rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7); + + value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) | + _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) | + _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ); + rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, value16); +} + +static void _rtl92cu_init_chipN_one_out_ep_priority(struct ieee80211_hw *hw, + bool wmm_enable, + u8 queue_sel) +{ + u16 uninitialized_var(value); + + switch (queue_sel) { + case TX_SELE_HQ: + value = QUEUE_HIGH; + break; + case TX_SELE_LQ: + value = QUEUE_LOW; + break; + case TX_SELE_NQ: + value = QUEUE_NORMAL; + break; + default: + WARN_ON(1); /* Shall not reach here! */ + break; + } + _rtl92c_init_chipN_reg_priority(hw, value, value, value, value, + value, value); + printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel); +} + +static void _rtl92cu_init_chipN_two_out_ep_priority(struct ieee80211_hw *hw, + bool wmm_enable, + u8 queue_sel) +{ + u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ; + u16 uninitialized_var(valueHi); + u16 uninitialized_var(valueLow); + + switch (queue_sel) { + case (TX_SELE_HQ | TX_SELE_LQ): + valueHi = QUEUE_HIGH; + valueLow = QUEUE_LOW; + break; + case (TX_SELE_NQ | TX_SELE_LQ): + valueHi = QUEUE_NORMAL; + valueLow = QUEUE_LOW; + break; + case (TX_SELE_HQ | TX_SELE_NQ): + valueHi = QUEUE_HIGH; + valueLow = QUEUE_NORMAL; + break; + default: + WARN_ON(1); + break; + } + if (!wmm_enable) { + beQ = valueLow; + bkQ = valueLow; + viQ = valueHi; + voQ = valueHi; + mgtQ = valueHi; + hiQ = valueHi; + } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */ + beQ = valueHi; + bkQ = valueLow; + viQ = valueLow; + voQ = valueHi; + mgtQ = valueHi; + hiQ = valueHi; + } + _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ); + printk(KERN_INFO "rtl8192cu: Tx queue select: 0x%02x\n", queue_sel); +} + +static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw, + bool wmm_enable, + u8 queue_sel) +{ + u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (!wmm_enable) { /* typical setting */ + beQ = QUEUE_LOW; + bkQ = QUEUE_LOW; + viQ = QUEUE_NORMAL; + voQ = QUEUE_HIGH; + mgtQ = QUEUE_HIGH; + hiQ = QUEUE_HIGH; + } else { /* for WMM */ + beQ = QUEUE_LOW; + bkQ = QUEUE_NORMAL; + viQ = QUEUE_NORMAL; + voQ = QUEUE_HIGH; + mgtQ = QUEUE_HIGH; + hiQ = QUEUE_HIGH; + } + _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ); + RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, + ("Tx queue select :0x%02x..\n", queue_sel)); +} + +static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw, + bool wmm_enable, + u8 out_ep_num, + u8 queue_sel) +{ + switch (out_ep_num) { + case 1: + _rtl92cu_init_chipN_one_out_ep_priority(hw, wmm_enable, + queue_sel); + break; + case 2: + _rtl92cu_init_chipN_two_out_ep_priority(hw, wmm_enable, + queue_sel); + break; + case 3: + _rtl92cu_init_chipN_three_out_ep_priority(hw, wmm_enable, + queue_sel); + break; + default: + WARN_ON(1); /* Shall not reach here! */ + break; + } +} + +static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw, + bool wmm_enable, + u8 out_ep_num, + u8 queue_sel) +{ + u8 hq_sele; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (out_ep_num) { + case 2: /* (TX_SELE_HQ|TX_SELE_LQ) */ + if (!wmm_enable) /* typical setting */ + hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ | + HQSEL_HIQ; + else /* for WMM */ + hq_sele = HQSEL_VOQ | HQSEL_BEQ | HQSEL_MGTQ | + HQSEL_HIQ; + break; + case 1: + if (TX_SELE_LQ == queue_sel) { + /* map all endpoint to Low queue */ + hq_sele = 0; + } else if (TX_SELE_HQ == queue_sel) { + /* map all endpoint to High queue */ + hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_BEQ | + HQSEL_BKQ | HQSEL_MGTQ | HQSEL_HIQ; + } + break; + default: + WARN_ON(1); /* Shall not reach here! */ + break; + } + rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele); + RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, + ("Tx queue select :0x%02x..\n", hq_sele)); +} + +static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw, + bool wmm_enable, + u8 out_ep_num, + u8 queue_sel) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + if (IS_NORMAL_CHIP(rtlhal->version)) + _rtl92cu_init_chipN_queue_priority(hw, wmm_enable, out_ep_num, + queue_sel); + else + _rtl92cu_init_chipT_queue_priority(hw, wmm_enable, out_ep_num, + queue_sel); +} + +static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw) +{ +} + +static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw) +{ + u16 value16; + + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APP_FCS | + RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL | + RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32); + rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf); + /* Accept all multicast address */ + rtl_write_dword(rtlpriv, REG_MAR, 0xFFFFFFFF); + rtl_write_dword(rtlpriv, REG_MAR + 4, 0xFFFFFFFF); + /* Accept all management frames */ + value16 = 0xFFFF; + rtl92c_set_mgt_filter(hw, value16); + /* Reject all control frame - default value is 0 */ + rtl92c_set_ctrl_filter(hw, 0x0); + /* Accept all data frames */ + value16 = 0xFFFF; + rtl92c_set_data_filter(hw, value16); +} + +static int _rtl92cu_init_mac(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); + int err = 0; + u32 boundary = 0; + u8 wmm_enable = false; /* TODO */ + u8 out_ep_nums = rtlusb->out_ep_nums; + u8 queue_sel = rtlusb->out_queue_sel; + err = _rtl92cu_init_power_on(hw); + + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("Failed to init power on!\n")); + return err; + } + if (!wmm_enable) { + boundary = TX_PAGE_BOUNDARY; + } else { /* for WMM */ + boundary = (IS_NORMAL_CHIP(rtlhal->version)) + ? WMM_CHIP_B_TX_PAGE_BOUNDARY + : WMM_CHIP_A_TX_PAGE_BOUNDARY; + } + if (false == rtl92c_init_llt_table(hw, boundary)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("Failed to init LLT Table!\n")); + return -EINVAL; + } + _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums, + queue_sel); + _rtl92c_init_trx_buffer(hw, wmm_enable); + _rtl92cu_init_queue_priority(hw, wmm_enable, out_ep_nums, + queue_sel); + /* Get Rx PHY status in order to report RSSI and others. */ + rtl92c_init_driver_info_size(hw, RTL92C_DRIVER_INFO_SIZE); + rtl92c_init_interrupt(hw); + rtl92c_init_network_type(hw); + _rtl92cu_init_wmac_setting(hw); + rtl92c_init_adaptive_ctrl(hw); + rtl92c_init_edca(hw); + rtl92c_init_rate_fallback(hw); + rtl92c_init_retry_function(hw); + _rtl92cu_init_usb_aggregation(hw); + rtlpriv->cfg->ops->set_bw_mode(hw, NL80211_CHAN_HT20); + rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version)); + rtl92c_init_beacon_parameters(hw, rtlhal->version); + rtl92c_init_ampdu_aggregation(hw); + rtl92c_init_beacon_max_error(hw, true); + return err; +} + +void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 sec_reg_value = 0x0; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n", + rtlpriv->sec.pairwise_enc_algorithm, + rtlpriv->sec.group_enc_algorithm)); + if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + ("not open sw encryption\n")); + return; + } + sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable; + if (rtlpriv->sec.use_defaultkey) { + sec_reg_value |= SCR_TxUseDK; + sec_reg_value |= SCR_RxUseDK; + } + if (IS_NORMAL_CHIP(rtlhal->version)) + sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK); + rtl_write_byte(rtlpriv, REG_CR + 1, 0x02); + RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, + ("The SECR-value %x\n", sec_reg_value)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); +} + +static void _rtl92cu_hw_configure(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + /* To Fix MAC loopback mode fail. */ + rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f); + rtl_write_byte(rtlpriv, 0x15, 0xe9); + /* HW SEQ CTRL */ + /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */ + rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF); + /* fixed USB interface interference issue */ + rtl_write_byte(rtlpriv, 0xfe40, 0xe0); + rtl_write_byte(rtlpriv, 0xfe41, 0x8d); + rtl_write_byte(rtlpriv, 0xfe42, 0x80); + rtlusb->reg_bcn_ctrl_val = 0x18; + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val); +} + +static void _InitPABias(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 pa_setting; + + /* FIXED PA current issue */ + pa_setting = efuse_read_1byte(hw, 0x1FA); + if (!(pa_setting & BIT(0))) { + rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x0F406); + rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x4F406); + rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0x8F406); + rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0FFFFF, 0xCF406); + } + if (!(pa_setting & BIT(1)) && IS_NORMAL_CHIP(rtlhal->version) && + IS_92C_SERIAL(rtlhal->version)) { + rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x0F406); + rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x4F406); + rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0x8F406); + rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0FFFFF, 0xCF406); + } + if (!(pa_setting & BIT(4))) { + pa_setting = rtl_read_byte(rtlpriv, 0x16); + pa_setting &= 0x0F; + rtl_write_byte(rtlpriv, 0x16, pa_setting | 0x90); + } +} + +static void _InitAntenna_Selection(struct ieee80211_hw *hw) +{ +#ifdef CONFIG_ANTENNA_DIVERSITY + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (pHalData->AntDivCfg == 0) + return; + + if (rtlphy->rf_type == RF_1T1R) { + rtl_write_dword(rtlpriv, REG_LEDCFG0, + rtl_read_dword(rtlpriv, + REG_LEDCFG0)|BIT(23)); + rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01); + if (rtl_get_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300) == + Antenna_A) + pHalData->CurAntenna = Antenna_A; + else + pHalData->CurAntenna = Antenna_B; + } +#endif +} + +static void _dump_registers(struct ieee80211_hw *hw) +{ +} + +static void _update_mac_setting(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR); + mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0); + mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1); + mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2); +} + +int rtl92cu_hw_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + int err = 0; + static bool iqk_initialized; + + rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU; + err = _rtl92cu_init_mac(hw); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("init mac failed!\n")); + return err; + } + err = rtl92c_download_fw(hw); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + ("Failed to download FW. Init HW without FW now..\n")); + err = 1; + rtlhal->fw_ready = false; + return err; + } else { + rtlhal->fw_ready = true; + } + rtlhal->last_hmeboxnum = 0; /* h2c */ + _rtl92cu_phy_param_tab_init(hw); + rtl92cu_phy_mac_config(hw); + rtl92cu_phy_bb_config(hw); + rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; + rtl92c_phy_rf_config(hw); + if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && + !IS_92C_SERIAL(rtlhal->version)) { + rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G1, MASKDWORD, 0x30255); + rtl_set_rfreg(hw, RF90_PATH_A, RF_RX_G2, MASKDWORD, 0x50a00); + } + rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0, + RF_CHNLBW, RFREG_OFFSET_MASK); + rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1, + RF_CHNLBW, RFREG_OFFSET_MASK); + rtl92cu_bb_block_on(hw); + rtl_cam_reset_all_entry(hw); + rtl92cu_enable_hw_security_config(hw); + ppsc->rfpwr_state = ERFON; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr); + if (ppsc->rfpwr_state == ERFON) { + rtl92c_phy_set_rfpath_switch(hw, 1); + if (iqk_initialized) { + rtl92c_phy_iq_calibrate(hw, false); + } else { + rtl92c_phy_iq_calibrate(hw, false); + iqk_initialized = true; + } + rtl92c_dm_check_txpower_tracking(hw); + rtl92c_phy_lc_calibrate(hw); + } + _rtl92cu_hw_configure(hw); + _InitPABias(hw); + _InitAntenna_Selection(hw); + _update_mac_setting(hw); + rtl92c_dm_init(hw); + _dump_registers(hw); + return err; +} + +static void _DisableRFAFEAndResetBB(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); +/************************************** +a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue +b. RF path 0 offset 0x00 = 0x00 disable RF +c. APSD_CTRL 0x600[7:0] = 0x40 +d. SYS_FUNC_EN 0x02[7:0] = 0x16 reset BB state machine +e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine +***************************************/ + u8 eRFPath = 0, value8 = 0; + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + rtl_set_rfreg(hw, (enum radio_path)eRFPath, 0x0, MASKBYTE0, 0x0); + + value8 |= APSDOFF; + rtl_write_byte(rtlpriv, REG_APSD_CTRL, value8); /*0x40*/ + value8 = 0; + value8 |= (FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTn); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8);/*0x16*/ + value8 &= (~FEN_BB_GLB_RSTn); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value8); /*0x14*/ +} + +static void _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + + if (rtlhal->fw_version <= 0x20) { + /***************************** + f. MCUFWDL 0x80[7:0]=0 reset MCU ready status + g. SYS_FUNC_EN 0x02[10]= 0 reset MCU reg, (8051 reset) + h. SYS_FUNC_EN 0x02[15-12]= 5 reset MAC reg, DCORE + i. SYS_FUNC_EN 0x02[10]= 1 enable MCU reg, (8051 enable) + ******************************/ + u16 valu16 = 0; + + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0); + valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); + rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 & + (~FEN_CPUEN))); /* reset MCU ,8051 */ + valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN)&0x0FFF; + rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 | + (FEN_HWPDN|FEN_ELDR))); /* reset MAC */ + valu16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); + rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, (valu16 | + FEN_CPUEN)); /* enable MCU ,8051 */ + } else { + u8 retry_cnts = 0; + + /* IF fw in RAM code, do reset */ + if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(1)) { + /* reset MCU ready status */ + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0); + if (rtlhal->fw_ready) { + /* 8051 reset by self */ + rtl_write_byte(rtlpriv, REG_HMETFR+3, 0x20); + while ((retry_cnts++ < 100) && + (FEN_CPUEN & rtl_read_word(rtlpriv, + REG_SYS_FUNC_EN))) { + udelay(50); + } + if (retry_cnts >= 100) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("#####=> 8051 reset failed!.." + ".......................\n");); + /* if 8051 reset fail, reset MAC. */ + rtl_write_byte(rtlpriv, + REG_SYS_FUNC_EN + 1, + 0x50); + udelay(100); + } + } + } + /* Reset MAC and Enable 8051 */ + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54); + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0); + } + if (bWithoutHWSM) { + /***************************** + Without HW auto state machine + g.SYS_CLKR 0x08[15:0] = 0x30A3 disable MAC clock + h.AFE_PLL_CTRL 0x28[7:0] = 0x80 disable AFE PLL + i.AFE_XTAL_CTRL 0x24[15:0] = 0x880F gated AFE DIG_CLOCK + j.SYS_ISu_CTRL 0x00[7:0] = 0xF9 isolated digital to PON + ******************************/ + rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3); + rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80); + rtl_write_word(rtlpriv, REG_AFE_XTAL_CTRL, 0x880F); + rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, 0xF9); + } +} + +static void _ResetDigitalProcedure2(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); +/***************************** +k. SYS_FUNC_EN 0x03[7:0] = 0x44 disable ELDR runction +l. SYS_CLKR 0x08[15:0] = 0x3083 disable ELDR clock +m. SYS_ISO_CTRL 0x01[7:0] = 0x83 isolated ELDR to PON +******************************/ + rtl_write_word(rtlpriv, REG_SYS_CLKR, 0x70A3); + rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL+1, 0x82); +} + +static void _DisableGPIO(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); +/*************************************** +j. GPIO_PIN_CTRL 0x44[31:0]=0x000 +k. Value = GPIO_PIN_CTRL[7:0] +l. GPIO_PIN_CTRL 0x44[31:0] = 0x00FF0000 | (value <<8); write ext PIN level +m. GPIO_MUXCFG 0x42 [15:0] = 0x0780 +n. LEDCFG 0x4C[15:0] = 0x8080 +***************************************/ + u8 value8; + u16 value16; + u32 value32; + + /* 1. Disable GPIO[7:0] */ + rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, 0x0000); + value32 = rtl_read_dword(rtlpriv, REG_GPIO_PIN_CTRL) & 0xFFFF00FF; + value8 = (u8) (value32&0x000000FF); + value32 |= ((value8<<8) | 0x00FF0000); + rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, value32); + /* 2. Disable GPIO[10:8] */ + rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG+3, 0x00); + value16 = rtl_read_word(rtlpriv, REG_GPIO_MUXCFG+2) & 0xFF0F; + value8 = (u8) (value16&0x000F); + value16 |= ((value8<<4) | 0x0780); + rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL+2, value16); + /* 3. Disable LED0 & 1 */ + rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080); +} + +static void _DisableAnalog(struct ieee80211_hw *hw, bool bWithoutHWSM) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 value16 = 0; + u8 value8 = 0; + + if (bWithoutHWSM) { + /***************************** + n. LDOA15_CTRL 0x20[7:0] = 0x04 disable A15 power + o. LDOV12D_CTRL 0x21[7:0] = 0x54 disable digital core power + r. When driver call disable, the ASIC will turn off remaining + clock automatically + ******************************/ + rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x04); + value8 = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL); + value8 &= (~LDV12_EN); + rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8); + } + +/***************************** +h. SPS0_CTRL 0x11[7:0] = 0x23 enter PFM mode +i. APS_FSMCO 0x04[15:0] = 0x4802 set USB suspend +******************************/ + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23); + value16 |= (APDM_HOST | AFSM_HSUS | PFM_ALDN); + rtl_write_word(rtlpriv, REG_APS_FSMCO, (u16)value16); + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0E); +} + +static void _CardDisableHWSM(struct ieee80211_hw *hw) +{ + /* ==== RF Off Sequence ==== */ + _DisableRFAFEAndResetBB(hw); + /* ==== Reset digital sequence ====== */ + _ResetDigitalProcedure1(hw, false); + /* ==== Pull GPIO PIN to balance level and LED control ====== */ + _DisableGPIO(hw); + /* ==== Disable analog sequence === */ + _DisableAnalog(hw, false); +} + +static void _CardDisableWithoutHWSM(struct ieee80211_hw *hw) +{ + /*==== RF Off Sequence ==== */ + _DisableRFAFEAndResetBB(hw); + /* ==== Reset digital sequence ====== */ + _ResetDigitalProcedure1(hw, true); + /* ==== Pull GPIO PIN to balance level and LED control ====== */ + _DisableGPIO(hw); + /* ==== Reset digital sequence ====== */ + _ResetDigitalProcedure2(hw); + /* ==== Disable analog sequence === */ + _DisableAnalog(hw, true); +} + +static void _rtl92cu_set_bcn_ctrl_reg(struct ieee80211_hw *hw, + u8 set_bits, u8 clear_bits) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + rtlusb->reg_bcn_ctrl_val |= set_bits; + rtlusb->reg_bcn_ctrl_val &= ~clear_bits; + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlusb->reg_bcn_ctrl_val); +} + +static void _rtl92cu_stop_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 tmp1byte = 0; + if (IS_NORMAL_CHIP(rtlhal->version)) { + tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, + tmp1byte & (~BIT(6))); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64); + tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp1byte &= ~(BIT(0)); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); + } else { + rtl_write_byte(rtlpriv, REG_TXPAUSE, + rtl_read_byte(rtlpriv, REG_TXPAUSE) | BIT(6)); + } +} + +static void _rtl92cu_resume_tx_beacon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u8 tmp1byte = 0; + + if (IS_NORMAL_CHIP(rtlhal->version)) { + tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, + tmp1byte | BIT(6)); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff); + tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2); + tmp1byte |= BIT(0); + rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); + } else { + rtl_write_byte(rtlpriv, REG_TXPAUSE, + rtl_read_byte(rtlpriv, REG_TXPAUSE) & (~BIT(6))); + } +} + +static void _rtl92cu_enable_bcn_sub_func(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + if (IS_NORMAL_CHIP(rtlhal->version)) + _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(1)); + else + _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); +} + +static void _rtl92cu_disable_bcn_sub_func(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + if (IS_NORMAL_CHIP(rtlhal->version)) + _rtl92cu_set_bcn_ctrl_reg(hw, BIT(1), 0); + else + _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); +} + +static int _rtl92cu_set_media_status(struct ieee80211_hw *hw, + enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 bt_msr = rtl_read_byte(rtlpriv, MSR); + enum led_ctl_mode ledaction = LED_CTL_NO_LINK; + + bt_msr &= 0xfc; + rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF); + if (type == NL80211_IFTYPE_UNSPECIFIED || type == + NL80211_IFTYPE_STATION) { + _rtl92cu_stop_tx_beacon(hw); + _rtl92cu_enable_bcn_sub_func(hw); + } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) { + _rtl92cu_resume_tx_beacon(hw); + _rtl92cu_disable_bcn_sub_func(hw); + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("Set HW_VAR_MEDIA_" + "STATUS:No such media status(%x).\n", type)); + } + switch (type) { + case NL80211_IFTYPE_UNSPECIFIED: + bt_msr |= MSR_NOLINK; + ledaction = LED_CTL_LINK; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Set Network type to NO LINK!\n")); + break; + case NL80211_IFTYPE_ADHOC: + bt_msr |= MSR_ADHOC; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Set Network type to Ad Hoc!\n")); + break; + case NL80211_IFTYPE_STATION: + bt_msr |= MSR_INFRA; + ledaction = LED_CTL_LINK; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Set Network type to STA!\n")); + break; + case NL80211_IFTYPE_AP: + bt_msr |= MSR_AP; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Set Network type to AP!\n")); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("Network type %d not support!\n", type)); + goto error_out; + } + rtl_write_byte(rtlpriv, (MSR), bt_msr); + rtlpriv->cfg->ops->led_control(hw, ledaction); + if ((bt_msr & 0xfc) == MSR_AP) + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); + else + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); + return 0; +error_out: + return 1; +} + +void rtl92cu_card_disable(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + enum nl80211_iftype opmode; + + mac->link_state = MAC80211_NOLINK; + opmode = NL80211_IFTYPE_UNSPECIFIED; + _rtl92cu_set_media_status(hw, opmode); + rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + if (rtlusb->disableHWSM) + _CardDisableHWSM(hw); + else + _CardDisableWithoutHWSM(hw); +} + +void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) +{ + /* dummy routine needed for callback from rtl_op_configure_filter() */ +} + +/*========================================================================== */ + +static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw, + enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 filterout_non_associated_bssid = false; + + switch (type) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_STATION: + filterout_non_associated_bssid = true; + break; + case NL80211_IFTYPE_UNSPECIFIED: + case NL80211_IFTYPE_AP: + default: + break; + } + if (filterout_non_associated_bssid == true) { + if (IS_NORMAL_CHIP(rtlhal->version)) { + switch (rtlphy->current_io_type) { + case IO_CMD_RESUME_DM_BY_SCAN: + reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_RCR, (u8 *)(®_rcr)); + /* enable update TSF */ + _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); + break; + case IO_CMD_PAUSE_DM_BY_SCAN: + reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_RCR, (u8 *)(®_rcr)); + /* disable update TSF */ + _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); + break; + } + } else { + reg_rcr |= (RCR_CBSSID); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(®_rcr)); + _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5))); + } + } else if (filterout_non_associated_bssid == false) { + if (IS_NORMAL_CHIP(rtlhal->version)) { + reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(®_rcr)); + _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); + } else { + reg_rcr &= (~RCR_CBSSID); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, + (u8 *)(®_rcr)); + _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0); + } + } +} + +int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) +{ + if (_rtl92cu_set_media_status(hw, type)) + return -EOPNOTSUPP; + _rtl92cu_set_check_bssid(hw, type); + return 0; +} + +static void _InitBeaconParameters(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010); + + /* TODO: Remove these magic number */ + rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404); + rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME); + rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME); + /* Change beacon AIFS to the largest number + * beacause test chip does not contension before sending beacon. */ + if (IS_NORMAL_CHIP(rtlhal->version)) + rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F); + else + rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF); +} + +static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable, + bool Linked) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4) | BIT(3) | BIT(1)), 0x00); + rtl_write_byte(rtlpriv, REG_RD_CTRL+1, 0x6F); +} + +void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw) +{ + + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 bcn_interval, atim_window; + u32 value32; + + bcn_interval = mac->beacon_interval; + atim_window = 2; /*FIX MERGE */ + rtl_write_word(rtlpriv, REG_ATIMWND, atim_window); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); + _InitBeaconParameters(hw); + rtl_write_byte(rtlpriv, REG_SLOT, 0x09); + /* + * Force beacon frame transmission even after receiving beacon frame + * from other ad hoc STA + * + * + * Reset TSF Timer to zero, added by Roger. 2008.06.24 + */ + value32 = rtl_read_dword(rtlpriv, REG_TCR); + value32 &= ~TSFRST; + rtl_write_dword(rtlpriv, REG_TCR, value32); + value32 |= TSFRST; + rtl_write_dword(rtlpriv, REG_TCR, value32); + RT_TRACE(rtlpriv, COMP_INIT|COMP_BEACON, DBG_LOUD, + ("SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n", + value32)); + /* TODO: Modify later (Find the right parameters) + * NOTE: Fix test chip's bug (about contention windows's randomness) */ + if ((mac->opmode == NL80211_IFTYPE_ADHOC) || + (mac->opmode == NL80211_IFTYPE_AP)) { + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50); + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50); + } + _beacon_function_enable(hw, true, true); +} + +void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 bcn_interval = mac->beacon_interval; + + RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, + ("beacon_interval:%d\n", bcn_interval)); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); +} + +void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr) +{ +} + +void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + switch (variable) { + case HW_VAR_RCR: + *((u32 *)(val)) = mac->rx_conf; + break; + case HW_VAR_RF_STATE: + *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state; + break; + case HW_VAR_FWLPS_RF_ON:{ + enum rf_pwrstate rfState; + u32 val_rcr; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE, + (u8 *)(&rfState)); + if (rfState == ERFOFF) { + *((bool *) (val)) = true; + } else { + val_rcr = rtl_read_dword(rtlpriv, REG_RCR); + val_rcr &= 0x00070000; + if (val_rcr) + *((bool *) (val)) = false; + else + *((bool *) (val)) = true; + } + break; + } + case HW_VAR_FW_PSMODE_STATUS: + *((bool *) (val)) = ppsc->fw_current_inpsmode; + break; + case HW_VAR_CORRECT_TSF:{ + u64 tsf; + u32 *ptsf_low = (u32 *)&tsf; + u32 *ptsf_high = ((u32 *)&tsf) + 1; + + *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4)); + *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR); + *((u64 *)(val)) = tsf; + break; + } + case HW_VAR_MGT_FILTER: + *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP0); + break; + case HW_VAR_CTRL_FILTER: + *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP1); + break; + case HW_VAR_DATA_FILTER: + *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("switch case not process\n")); + break; + } +} + +void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + enum wireless_mode wirelessmode = mac->mode; + u8 idx = 0; + + switch (variable) { + case HW_VAR_ETHER_ADDR:{ + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_MACID + idx), + val[idx]); + } + break; + } + case HW_VAR_BASIC_RATE:{ + u16 rate_cfg = ((u16 *) val)[0]; + u8 rate_index = 0; + + rate_cfg &= 0x15f; + /* TODO */ + /* if (mac->current_network.vender == HT_IOT_PEER_CISCO + * && ((rate_cfg & 0x150) == 0)) { + * rate_cfg |= 0x010; + * } */ + rate_cfg |= 0x01; + rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); + rtl_write_byte(rtlpriv, REG_RRSR + 1, + (rate_cfg >> 8) & 0xff); + while (rate_cfg > 0x1) { + rate_cfg >>= 1; + rate_index++; + } + rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, + rate_index); + break; + } + case HW_VAR_BSSID:{ + for (idx = 0; idx < ETH_ALEN; idx++) { + rtl_write_byte(rtlpriv, (REG_BSSID + idx), + val[idx]); + } + break; + } + case HW_VAR_SIFS:{ + rtl_write_byte(rtlpriv, REG_SIFS_CCK + 1, val[0]); + rtl_write_byte(rtlpriv, REG_SIFS_OFDM + 1, val[1]); + rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]); + rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]); + rtl_write_byte(rtlpriv, REG_R2T_SIFS+1, val[0]); + rtl_write_byte(rtlpriv, REG_T2T_SIFS+1, val[0]); + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + ("HW_VAR_SIFS\n")); + break; + } + case HW_VAR_SLOT_TIME:{ + u8 e_aci; + u8 QOS_MODE = 1; + + rtl_write_byte(rtlpriv, REG_SLOT, val[0]); + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + ("HW_VAR_SLOT_TIME %x\n", val[0])); + if (QOS_MODE) { + for (e_aci = 0; e_aci < AC_MAX; e_aci++) + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_AC_PARAM, + (u8 *)(&e_aci)); + } else { + u8 sifstime = 0; + u8 u1bAIFS; + + if (IS_WIRELESS_MODE_A(wirelessmode) || + IS_WIRELESS_MODE_N_24G(wirelessmode) || + IS_WIRELESS_MODE_N_5G(wirelessmode)) + sifstime = 16; + else + sifstime = 10; + u1bAIFS = sifstime + (2 * val[0]); + rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM, + u1bAIFS); + rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM, + u1bAIFS); + rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM, + u1bAIFS); + rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM, + u1bAIFS); + } + break; + } + case HW_VAR_ACK_PREAMBLE:{ + u8 reg_tmp; + u8 short_preamble = (bool) (*(u8 *) val); + reg_tmp = 0; + if (short_preamble) + reg_tmp |= 0x80; + rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp); + break; + } + case HW_VAR_AMPDU_MIN_SPACE:{ + u8 min_spacing_to_set; + u8 sec_min_space; + + min_spacing_to_set = *((u8 *) val); + if (min_spacing_to_set <= 7) { + switch (rtlpriv->sec.pairwise_enc_algorithm) { + case NO_ENCRYPTION: + case AESCCMP_ENCRYPTION: + sec_min_space = 0; + break; + case WEP40_ENCRYPTION: + case WEP104_ENCRYPTION: + case TKIP_ENCRYPTION: + sec_min_space = 6; + break; + default: + sec_min_space = 7; + break; + } + if (min_spacing_to_set < sec_min_space) + min_spacing_to_set = sec_min_space; + mac->min_space_cfg = ((mac->min_space_cfg & + 0xf8) | + min_spacing_to_set); + *val = min_spacing_to_set; + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n", + mac->min_space_cfg)); + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); + } + break; + } + case HW_VAR_SHORTGI_DENSITY:{ + u8 density_to_set; + + density_to_set = *((u8 *) val); + density_to_set &= 0x1f; + mac->min_space_cfg &= 0x07; + mac->min_space_cfg |= (density_to_set << 3); + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + ("Set HW_VAR_SHORTGI_DENSITY: %#x\n", + mac->min_space_cfg)); + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, + mac->min_space_cfg); + break; + } + case HW_VAR_AMPDU_FACTOR:{ + u8 regtoset_normal[4] = {0x41, 0xa8, 0x72, 0xb9}; + u8 factor_toset; + u8 *p_regtoset = NULL; + u8 index = 0; + + p_regtoset = regtoset_normal; + factor_toset = *((u8 *) val); + if (factor_toset <= 3) { + factor_toset = (1 << (factor_toset + 2)); + if (factor_toset > 0xf) + factor_toset = 0xf; + for (index = 0; index < 4; index++) { + if ((p_regtoset[index] & 0xf0) > + (factor_toset << 4)) + p_regtoset[index] = + (p_regtoset[index] & 0x0f) + | (factor_toset << 4); + if ((p_regtoset[index] & 0x0f) > + factor_toset) + p_regtoset[index] = + (p_regtoset[index] & 0xf0) + | (factor_toset); + rtl_write_byte(rtlpriv, + (REG_AGGLEN_LMT + index), + p_regtoset[index]); + } + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + ("Set HW_VAR_AMPDU_FACTOR: %#x\n", + factor_toset)); + } + break; + } + case HW_VAR_AC_PARAM:{ + u8 e_aci = *((u8 *) val); + u32 u4b_ac_param; + u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min); + u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max); + u16 tx_op = le16_to_cpu(mac->ac[e_aci].tx_op); + + u4b_ac_param = (u32) mac->ac[e_aci].aifs; + u4b_ac_param |= (u32) ((cw_min & 0xF) << + AC_PARAM_ECW_MIN_OFFSET); + u4b_ac_param |= (u32) ((cw_max & 0xF) << + AC_PARAM_ECW_MAX_OFFSET); + u4b_ac_param |= (u32) tx_op << AC_PARAM_TXOP_OFFSET; + RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, + ("queue:%x, ac_param:%x\n", e_aci, + u4b_ac_param)); + switch (e_aci) { + case AC1_BK: + rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, + u4b_ac_param); + break; + case AC0_BE: + rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, + u4b_ac_param); + break; + case AC2_VI: + rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, + u4b_ac_param); + break; + case AC3_VO: + rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, + u4b_ac_param); + break; + default: + RT_ASSERT(false, ("SetHwReg8185(): invalid" + " aci: %d !\n", e_aci)); + break; + } + if (rtlusb->acm_method != eAcmWay2_SW) + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_ACM_CTRL, (u8 *)(&e_aci)); + break; + } + case HW_VAR_ACM_CTRL:{ + u8 e_aci = *((u8 *) val); + union aci_aifsn *p_aci_aifsn = (union aci_aifsn *) + (&(mac->ac[0].aifs)); + u8 acm = p_aci_aifsn->f.acm; + u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); + + acm_ctrl = + acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1); + if (acm) { + switch (e_aci) { + case AC0_BE: + acm_ctrl |= AcmHw_BeqEn; + break; + case AC2_VI: + acm_ctrl |= AcmHw_ViqEn; + break; + case AC3_VO: + acm_ctrl |= AcmHw_VoqEn; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + ("HW_VAR_ACM_CTRL acm set " + "failed: eACI is %d\n", acm)); + break; + } + } else { + switch (e_aci) { + case AC0_BE: + acm_ctrl &= (~AcmHw_BeqEn); + break; + case AC2_VI: + acm_ctrl &= (~AcmHw_ViqEn); + break; + case AC3_VO: + acm_ctrl &= (~AcmHw_BeqEn); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("switch case not process\n")); + break; + } + } + RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE, + ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] " + "Write 0x%X\n", acm_ctrl)); + rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); + break; + } + case HW_VAR_RCR:{ + rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]); + mac->rx_conf = ((u32 *) (val))[0]; + RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG, + ("### Set RCR(0x%08x) ###\n", mac->rx_conf)); + break; + } + case HW_VAR_RETRY_LIMIT:{ + u8 retry_limit = ((u8 *) (val))[0]; + + rtl_write_word(rtlpriv, REG_RL, + retry_limit << RETRY_LIMIT_SHORT_SHIFT | + retry_limit << RETRY_LIMIT_LONG_SHIFT); + RT_TRACE(rtlpriv, COMP_MLME, DBG_DMESG, ("Set HW_VAR_R" + "ETRY_LIMIT(0x%08x)\n", retry_limit)); + break; + } + case HW_VAR_DUAL_TSF_RST: + rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1))); + break; + case HW_VAR_EFUSE_BYTES: + rtlefuse->efuse_usedbytes = *((u16 *) val); + break; + case HW_VAR_EFUSE_USAGE: + rtlefuse->efuse_usedpercentage = *((u8 *) val); + break; + case HW_VAR_IO_CMD: + rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val)); + break; + case HW_VAR_WPA_CONFIG: + rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val)); + break; + case HW_VAR_SET_RPWM:{ + u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM); + + if (rpwm_val & BIT(7)) + rtl_write_byte(rtlpriv, REG_USB_HRPWM, + (*(u8 *)val)); + else + rtl_write_byte(rtlpriv, REG_USB_HRPWM, + ((*(u8 *)val) | BIT(7))); + break; + } + case HW_VAR_H2C_FW_PWRMODE:{ + u8 psmode = (*(u8 *) val); + + if ((psmode != FW_PS_ACTIVE_MODE) && + (!IS_92C_SERIAL(rtlhal->version))) + rtl92c_dm_rf_saving(hw, true); + rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val)); + break; + } + case HW_VAR_FW_PSMODE_STATUS: + ppsc->fw_current_inpsmode = *((bool *) val); + break; + case HW_VAR_H2C_FW_JOINBSSRPT:{ + u8 mstatus = (*(u8 *) val); + u8 tmp_reg422; + bool recover = false; + + if (mstatus == RT_MEDIA_CONNECT) { + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_AID, NULL); + rtl_write_byte(rtlpriv, REG_CR + 1, 0x03); + _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3)); + _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); + tmp_reg422 = rtl_read_byte(rtlpriv, + REG_FWHW_TXQ_CTRL + 2); + if (tmp_reg422 & BIT(6)) + recover = true; + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, + tmp_reg422 & (~BIT(6))); + rtl92c_set_fw_rsvdpagepkt(hw, 0); + _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0); + _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); + if (recover) + rtl_write_byte(rtlpriv, + REG_FWHW_TXQ_CTRL + 2, + tmp_reg422 | BIT(6)); + rtl_write_byte(rtlpriv, REG_CR + 1, 0x02); + } + rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val)); + break; + } + case HW_VAR_AID:{ + u16 u2btmp; + + u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); + u2btmp &= 0xC000; + rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, + (u2btmp | mac->assoc_id)); + break; + } + case HW_VAR_CORRECT_TSF:{ + u8 btype_ibss = ((u8 *) (val))[0]; + + if (btype_ibss == true) + _rtl92cu_stop_tx_beacon(hw); + _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(3)); + rtl_write_dword(rtlpriv, REG_TSFTR, (u32)(mac->tsf & + 0xffffffff)); + rtl_write_dword(rtlpriv, REG_TSFTR + 4, + (u32)((mac->tsf >> 32) & 0xffffffff)); + _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0); + if (btype_ibss == true) + _rtl92cu_resume_tx_beacon(hw); + break; + } + case HW_VAR_MGT_FILTER: + rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val); + break; + case HW_VAR_CTRL_FILTER: + rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val); + break; + case HW_VAR_DATA_FILTER: + rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case " + "not process\n")); + break; + } +} + +void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u32 ratr_value = (u32) mac->basic_rates; + u8 *mcsrate = mac->mcs; + u8 ratr_index = 0; + u8 nmode = mac->ht_enable; + u8 mimo_ps = 1; + u16 shortgi_rate = 0; + u32 tmp_ratr_value = 0; + u8 curtxbw_40mhz = mac->bw_40; + u8 curshortgi_40mhz = mac->sgi_40; + u8 curshortgi_20mhz = mac->sgi_20; + enum wireless_mode wirelessmode = mac->mode; + + ratr_value |= ((*(u16 *) (mcsrate))) << 12; + switch (wirelessmode) { + case WIRELESS_MODE_B: + if (ratr_value & 0x0000000c) + ratr_value &= 0x0000000d; + else + ratr_value &= 0x0000000f; + break; + case WIRELESS_MODE_G: + ratr_value &= 0x00000FF5; + break; + case WIRELESS_MODE_N_24G: + case WIRELESS_MODE_N_5G: + nmode = 1; + if (mimo_ps == 0) { + ratr_value &= 0x0007F005; + } else { + u32 ratr_mask; + + if (get_rf_type(rtlphy) == RF_1T2R || + get_rf_type(rtlphy) == RF_1T1R) + ratr_mask = 0x000ff005; + else + ratr_mask = 0x0f0ff005; + if (curtxbw_40mhz) + ratr_mask |= 0x00000010; + ratr_value &= ratr_mask; + } + break; + default: + if (rtlphy->rf_type == RF_1T2R) + ratr_value &= 0x000ff0ff; + else + ratr_value &= 0x0f0ff0ff; + break; + } + ratr_value &= 0x0FFFFFFF; + if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) || + (!curtxbw_40mhz && curshortgi_20mhz))) { + ratr_value |= 0x10000000; + tmp_ratr_value = (ratr_value >> 12); + for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) { + if ((1 << shortgi_rate) & tmp_ratr_value) + break; + } + shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) | + (shortgi_rate << 4) | (shortgi_rate); + } + rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("%x\n", rtl_read_dword(rtlpriv, + REG_ARFR0))); +} + +void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u32 ratr_bitmap = (u32) mac->basic_rates; + u8 *p_mcsrate = mac->mcs; + u8 ratr_index = 0; + u8 curtxbw_40mhz = mac->bw_40; + u8 curshortgi_40mhz = mac->sgi_40; + u8 curshortgi_20mhz = mac->sgi_20; + enum wireless_mode wirelessmode = mac->mode; + bool shortgi = false; + u8 rate_mask[5]; + u8 macid = 0; + u8 mimops = 1; + + ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12); + switch (wirelessmode) { + case WIRELESS_MODE_B: + ratr_index = RATR_INX_WIRELESS_B; + if (ratr_bitmap & 0x0000000c) + ratr_bitmap &= 0x0000000d; + else + ratr_bitmap &= 0x0000000f; + break; + case WIRELESS_MODE_G: + ratr_index = RATR_INX_WIRELESS_GB; + if (rssi_level == 1) + ratr_bitmap &= 0x00000f00; + else if (rssi_level == 2) + ratr_bitmap &= 0x00000ff0; + else + ratr_bitmap &= 0x00000ff5; + break; + case WIRELESS_MODE_A: + ratr_index = RATR_INX_WIRELESS_A; + ratr_bitmap &= 0x00000ff0; + break; + case WIRELESS_MODE_N_24G: + case WIRELESS_MODE_N_5G: + ratr_index = RATR_INX_WIRELESS_NGB; + if (mimops == 0) { + if (rssi_level == 1) + ratr_bitmap &= 0x00070000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0007f000; + else + ratr_bitmap &= 0x0007f005; + } else { + if (rtlphy->rf_type == RF_1T2R || + rtlphy->rf_type == RF_1T1R) { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x000f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x000ff000; + else + ratr_bitmap &= 0x000ff005; + } + } else { + if (curtxbw_40mhz) { + if (rssi_level == 1) + ratr_bitmap &= 0x0f0f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f0ff000; + else + ratr_bitmap &= 0x0f0ff015; + } else { + if (rssi_level == 1) + ratr_bitmap &= 0x0f0f0000; + else if (rssi_level == 2) + ratr_bitmap &= 0x0f0ff000; + else + ratr_bitmap &= 0x0f0ff005; + } + } + } + if ((curtxbw_40mhz && curshortgi_40mhz) || + (!curtxbw_40mhz && curshortgi_20mhz)) { + if (macid == 0) + shortgi = true; + else if (macid == 1) + shortgi = false; + } + break; + default: + ratr_index = RATR_INX_WIRELESS_NGB; + if (rtlphy->rf_type == RF_1T2R) + ratr_bitmap &= 0x000ff0ff; + else + ratr_bitmap &= 0x0f0ff0ff; + break; + } + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("ratr_bitmap :%x\n", + ratr_bitmap)); + *(u32 *)&rate_mask = ((ratr_bitmap & 0x0fffffff) | + ratr_index << 28); + rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80; + RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, " + "ratr_val:%x, %x:%x:%x:%x:%x\n", + ratr_index, ratr_bitmap, + rate_mask[0], rate_mask[1], + rate_mask[2], rate_mask[3], + rate_mask[4])); + rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask); +} + +void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 sifs_timer; + + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, + (u8 *)&mac->slot_time); + if (!mac->ht_enable) + sifs_timer = 0x0a0a; + else + sifs_timer = 0x0e0e; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); +} + +bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate; + u8 u1tmp = 0; + bool actuallyset = false; + unsigned long flag = 0; + /* to do - usb autosuspend */ + u8 usb_autosuspend = 0; + + if (ppsc->swrf_processing) + return false; + spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); + if (ppsc->rfchange_inprogress) { + spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); + return false; + } else { + ppsc->rfchange_inprogress = true; + spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); + } + cur_rfstate = ppsc->rfpwr_state; + if (usb_autosuspend) { + /* to do................... */ + } else { + if (ppsc->pwrdown_mode) { + u1tmp = rtl_read_byte(rtlpriv, REG_HSISR); + e_rfpowerstate_toset = (u1tmp & BIT(7)) ? + ERFOFF : ERFON; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + ("pwrdown, 0x5c(BIT7)=%02x\n", u1tmp)); + } else { + rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, + rtl_read_byte(rtlpriv, + REG_MAC_PINMUX_CFG) & ~(BIT(3))); + u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL); + e_rfpowerstate_toset = (u1tmp & BIT(3)) ? + ERFON : ERFOFF; + RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, + ("GPIO_IN=%02x\n", u1tmp)); + } + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("N-SS RF =%x\n", + e_rfpowerstate_toset)); + } + if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) { + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW " + "Radio ON, RF ON\n")); + ppsc->hwradiooff = false; + actuallyset = true; + } else if ((!ppsc->hwradiooff) && (e_rfpowerstate_toset == + ERFOFF)) { + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("GPIOChangeRF - HW" + " Radio OFF\n")); + ppsc->hwradiooff = true; + actuallyset = true; + } else { + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD , + ("pHalData->bHwRadioOff and eRfPowerStateToSet do not" + " match: pHalData->bHwRadioOff %x, eRfPowerStateToSet " + "%x\n", ppsc->hwradiooff, e_rfpowerstate_toset)); + } + if (actuallyset) { + ppsc->hwradiooff = 1; + if (e_rfpowerstate_toset == ERFON) { + if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && + RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) + RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM); + else if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3) + && RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3)) + RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3); + } + spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); + ppsc->rfchange_inprogress = false; + spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); + /* For power down module, we need to enable register block + * contrl reg at 0x1c. Then enable power down control bit + * of register 0x04 BIT4 and BIT15 as 1. + */ + if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) { + /* Enable register area 0x0-0xc. */ + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0); + if (IS_HARDWARE_TYPE_8723U(rtlhal)) { + /* + * We should configure HW PDn source for WiFi + * ONLY, and then our HW will be set in + * power-down mode if PDn source from all + * functions are configured. + */ + u1tmp = rtl_read_byte(rtlpriv, + REG_MULTI_FUNC_CTRL); + rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, + (u1tmp|WL_HWPDN_EN)); + } else { + rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812); + } + } + if (e_rfpowerstate_toset == ERFOFF) { + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM); + else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3) + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3); + } + } else if (e_rfpowerstate_toset == ERFOFF || cur_rfstate == ERFOFF) { + /* Enter D3 or ASPM after GPIO had been done. */ + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM); + else if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_PCI_D3) + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_PCI_D3); + spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); + ppsc->rfchange_inprogress = false; + spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); + } else { + spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag); + ppsc->rfchange_inprogress = false; + spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag); + } + *valid = 1; + return !ppsc->hwradiooff; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h new file mode 100644 index 000000000000..62af555bb61c --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h @@ -0,0 +1,116 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#ifndef __RTL92CU_HW_H__ +#define __RTL92CU_HW_H__ + +#define H2C_RA_MASK 6 + +#define LLT_POLLING_LLT_THRESHOLD 20 +#define LLT_POLLING_READY_TIMEOUT_COUNT 100 +#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255 + +#define RX_PAGE_SIZE_REG_VALUE PBP_128 +/* Note: We will divide number of page equally for each queue + * other than public queue! */ +#define TX_TOTAL_PAGE_NUMBER 0xF8 +#define TX_PAGE_BOUNDARY (TX_TOTAL_PAGE_NUMBER + 1) + + +#define CHIP_B_PAGE_NUM_PUBQ 0xE7 + +/* For Test Chip Setting + * (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */ +#define CHIP_A_PAGE_NUM_PUBQ 0x7E + + +/* For Chip A Setting */ +#define WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER 0xF5 +#define WMM_CHIP_A_TX_PAGE_BOUNDARY \ + (WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */ + +#define WMM_CHIP_A_PAGE_NUM_PUBQ 0xA3 +#define WMM_CHIP_A_PAGE_NUM_HPQ 0x29 +#define WMM_CHIP_A_PAGE_NUM_LPQ 0x29 + + + +/* Note: For Chip B Setting ,modify later */ +#define WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER 0xF5 +#define WMM_CHIP_B_TX_PAGE_BOUNDARY \ + (WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER + 1) /* F6 */ + +#define WMM_CHIP_B_PAGE_NUM_PUBQ 0xB0 +#define WMM_CHIP_B_PAGE_NUM_HPQ 0x29 +#define WMM_CHIP_B_PAGE_NUM_LPQ 0x1C +#define WMM_CHIP_B_PAGE_NUM_NPQ 0x1C + +#define BOARD_TYPE_NORMAL_MASK 0xE0 +#define BOARD_TYPE_TEST_MASK 0x0F + +/* should be renamed and moved to another file */ +enum _BOARD_TYPE_8192CUSB { + BOARD_USB_DONGLE = 0, /* USB dongle */ + BOARD_USB_High_PA = 1, /* USB dongle - high power PA */ + BOARD_MINICARD = 2, /* Minicard */ + BOARD_USB_SOLO = 3, /* USB solo-Slim module */ + BOARD_USB_COMBO = 4, /* USB Combo-Slim module */ +}; + +#define IS_HIGHT_PA(boardtype) \ + ((boardtype == BOARD_USB_High_PA) ? true : false) + +#define RTL92C_DRIVER_INFO_SIZE 4 +void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw); +void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw); +int rtl92cu_hw_init(struct ieee80211_hw *hw); +void rtl92cu_card_disable(struct ieee80211_hw *hw); +int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type); +void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw); +void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw); +void rtl92cu_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr); +void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw); +void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level); + +void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw); +bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid); +void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); +u8 _rtl92c_get_chnl_group(u8 chnl); +int rtl92c_download_fw(struct ieee80211_hw *hw); +void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); +void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished); +void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); +void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, + u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); +bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c new file mode 100644 index 000000000000..332c74348a69 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.c @@ -0,0 +1,142 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../usb.h" +#include "reg.h" +#include "led.h" + +static void _rtl92cu_init_led(struct ieee80211_hw *hw, + struct rtl_led *pled, enum rtl_led_pin ledpin) +{ + pled->hw = hw; + pled->ledpin = ledpin; + pled->ledon = false; +} + +static void _rtl92cu_deInit_led(struct rtl_led *pled) +{ +} + +void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + u8 ledcfg; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, + ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin)); + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); + switch (pled->ledpin) { + case LED_PIN_GPIO0: + break; + case LED_PIN_LED0: + rtl_write_byte(rtlpriv, + REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6)); + break; + case LED_PIN_LED1: + rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5)); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("switch case not process\n")); + break; + } + pled->ledon = true; +} + +void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw); + u8 ledcfg; + + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, + ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin)); + ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2); + switch (pled->ledpin) { + case LED_PIN_GPIO0: + break; + case LED_PIN_LED0: + ledcfg &= 0xf0; + if (usbpriv->ledctl.led_opendrain == true) + rtl_write_byte(rtlpriv, REG_LEDCFG2, + (ledcfg | BIT(1) | BIT(5) | BIT(6))); + else + rtl_write_byte(rtlpriv, REG_LEDCFG2, + (ledcfg | BIT(3) | BIT(5) | BIT(6))); + break; + case LED_PIN_LED1: + ledcfg &= 0x0f; + rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3))); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("switch case not process\n")); + break; + } + pled->ledon = false; +} + +void rtl92cu_init_sw_leds(struct ieee80211_hw *hw) +{ + struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw); + _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led0), LED_PIN_LED0); + _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led1), LED_PIN_LED1); +} + +void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw) +{ + struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw); + _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led0)); + _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led1)); +} + +static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction) +{ +} + +void rtl92cu_led_control(struct ieee80211_hw *hw, + enum led_ctl_mode ledaction) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + + if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) && + (ledaction == LED_CTL_TX || + ledaction == LED_CTL_RX || + ledaction == LED_CTL_SITE_SURVEY || + ledaction == LED_CTL_LINK || + ledaction == LED_CTL_NO_LINK || + ledaction == LED_CTL_START_TO_LINK || + ledaction == LED_CTL_POWER_ON)) { + return; + } + RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n", + ledaction)); + _rtl92cu_sw_led_control(hw, ledaction); +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/led.h b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h new file mode 100644 index 000000000000..decaee4d1eb1 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/led.h @@ -0,0 +1,37 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + *****************************************************************************/ + +#ifndef __RTL92CU_LED_H__ +#define __RTL92CU_LED_H__ + +void rtl92cu_init_sw_leds(struct ieee80211_hw *hw); +void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw); +void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled); +void rtl92cu_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c new file mode 100644 index 000000000000..f8514cba17b6 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c @@ -0,0 +1,1144 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * +****************************************************************************/ +#include <linux/module.h> + +#include "../wifi.h" +#include "../pci.h" +#include "../usb.h" +#include "../ps.h" +#include "../cam.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "rf.h" +#include "dm.h" +#include "mac.h" +#include "trx.h" + +/* macro to shorten lines */ + +#define LINK_Q ui_link_quality +#define RX_EVM rx_evm_percentage +#define RX_SIGQ rx_mimo_signalquality + + +void rtl92c_read_chip_version(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + enum version_8192c chip_version = VERSION_UNKNOWN; + u32 value32; + + value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG); + if (value32 & TRP_VAUX_EN) { + chip_version = (value32 & TYPE_ID) ? VERSION_TEST_CHIP_92C : + VERSION_TEST_CHIP_88C; + } else { + /* Normal mass production chip. */ + chip_version = NORMAL_CHIP; + chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0); + chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0); + /* RTL8723 with BT function. */ + chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0); + if (IS_VENDOR_UMC(chip_version)) + chip_version |= ((value32 & CHIP_VER_RTL_MASK) ? + CHIP_VENDOR_UMC_B_CUT : 0); + if (IS_92C_SERIAL(chip_version)) { + value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM); + chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) == + CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0); + } else if (IS_8723_SERIES(chip_version)) { + value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS); + chip_version |= ((value32 & RF_RL_ID) ? + CHIP_8723_DRV_REV : 0); + } + } + rtlhal->version = (enum version_8192c)chip_version; + switch (rtlhal->version) { + case VERSION_NORMAL_TSMC_CHIP_92C_1T2R: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_B_CHIP_92C.\n")); + break; + case VERSION_NORMAL_TSMC_CHIP_92C: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_92C.\n")); + break; + case VERSION_NORMAL_TSMC_CHIP_88C: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_88C.\n")); + break; + case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_i" + "92C_1T2R_A_CUT.\n")); + break; + case VERSION_NORMAL_UMC_CHIP_92C_A_CUT: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_NORMAL_UMC_CHIP_" + "92C_A_CUT.\n")); + break; + case VERSION_NORMAL_UMC_CHIP_88C_A_CUT: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_NORMAL_UMC_CHIP" + "_88C_A_CUT.\n")); + break; + case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_NORMAL_UMC_CHIP" + "_92C_1T2R_B_CUT.\n")); + break; + case VERSION_NORMAL_UMC_CHIP_92C_B_CUT: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_NORMAL_UMC_CHIP" + "_92C_B_CUT.\n")); + break; + case VERSION_NORMAL_UMC_CHIP_88C_B_CUT: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_NORMAL_UMC_CHIP" + "_88C_B_CUT.\n")); + break; + case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_NORMA_UMC_CHIP" + "_8723_1T1R_A_CUT.\n")); + break; + case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_NORMA_UMC_CHIP" + "_8723_1T1R_B_CUT.\n")); + break; + case VERSION_TEST_CHIP_92C: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_TEST_CHIP_92C.\n")); + break; + case VERSION_TEST_CHIP_88C: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: VERSION_TEST_CHIP_88C.\n")); + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Chip Version ID: ???????????????.\n")); + break; + } + if (IS_92C_SERIAL(rtlhal->version)) + rtlphy->rf_type = + (IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R; + else + rtlphy->rf_type = RF_1T1R; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ? + "RF_2T2R" : "RF_1T1R")); + if (get_rf_type(rtlphy) == RF_1T1R) + rtlpriv->dm.rfpath_rxenable[0] = true; + else + rtlpriv->dm.rfpath_rxenable[0] = + rtlpriv->dm.rfpath_rxenable[1] = true; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n", + rtlhal->version)); +} + +/** + * writeLLT - LLT table write access + * @io: io callback + * @address: LLT logical address. + * @data: LLT data content + * + * Realtek hardware access function. + * + */ +bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + bool status = true; + long count = 0; + u32 value = _LLT_INIT_ADDR(address) | + _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS); + + rtl_write_dword(rtlpriv, REG_LLT_INIT, value); + do { + value = rtl_read_dword(rtlpriv, REG_LLT_INIT); + if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value)) + break; + if (count > POLLING_LLT_THRESHOLD) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("Failed to polling write LLT done at" + " address %d! _LLT_OP_VALUE(%x)\n", + address, _LLT_OP_VALUE(value))); + status = false; + break; + } + } while (++count); + return status; +} +/** + * rtl92c_init_LLT_table - Init LLT table + * @io: io callback + * @boundary: + * + * Realtek hardware access function. + * + */ +bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary) +{ + bool rst = true; + u32 i; + + for (i = 0; i < (boundary - 1); i++) { + rst = rtl92c_llt_write(hw, i , i + 1); + if (true != rst) { + printk(KERN_ERR "===> %s #1 fail\n", __func__); + return rst; + } + } + /* end of list */ + rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF); + if (true != rst) { + printk(KERN_ERR "===> %s #2 fail\n", __func__); + return rst; + } + /* Make the other pages as ring buffer + * This ring buffer is used as beacon buffer if we config this MAC + * as two MAC transfer. + * Otherwise used as local loopback buffer. + */ + for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) { + rst = rtl92c_llt_write(hw, i, (i + 1)); + if (true != rst) { + printk(KERN_ERR "===> %s #3 fail\n", __func__); + return rst; + } + } + /* Let last entry point to the start entry of ring buffer */ + rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary); + if (true != rst) { + printk(KERN_ERR "===> %s #4 fail\n", __func__); + return rst; + } + return rst; +} +void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 *macaddr = p_macaddr; + u32 entry_id = 0; + bool is_pairwise = false; + static u8 cam_const_addr[4][6] = { + {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x01}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x02}, + {0x00, 0x00, 0x00, 0x00, 0x00, 0x03} + }; + static u8 cam_const_broad[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + + if (clear_all) { + u8 idx = 0; + u8 cam_offset = 0; + u8 clear_number = 5; + + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n")); + for (idx = 0; idx < clear_number; idx++) { + rtl_cam_mark_invalid(hw, cam_offset + idx); + rtl_cam_empty_entry(hw, cam_offset + idx); + if (idx < 5) { + memset(rtlpriv->sec.key_buf[idx], 0, + MAX_KEY_LEN); + rtlpriv->sec.key_len[idx] = 0; + } + } + } else { + switch (enc_algo) { + case WEP40_ENCRYPTION: + enc_algo = CAM_WEP40; + break; + case WEP104_ENCRYPTION: + enc_algo = CAM_WEP104; + break; + case TKIP_ENCRYPTION: + enc_algo = CAM_TKIP; + break; + case AESCCMP_ENCRYPTION: + enc_algo = CAM_AES; + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("iillegal switch case\n")); + enc_algo = CAM_TKIP; + break; + } + if (is_wepkey || rtlpriv->sec.use_defaultkey) { + macaddr = cam_const_addr[key_index]; + entry_id = key_index; + } else { + if (is_group) { + macaddr = cam_const_broad; + entry_id = key_index; + } else { + key_index = PAIRWISE_KEYIDX; + entry_id = CAM_PAIRWISE_KEY_POSITION; + is_pairwise = true; + } + } + if (rtlpriv->sec.key_len[key_index] == 0) { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + ("delete one entry\n")); + rtl_cam_delete_one_entry(hw, p_macaddr, entry_id); + } else { + RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, + ("The insert KEY length is %d\n", + rtlpriv->sec.key_len[PAIRWISE_KEYIDX])); + RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, + ("The insert KEY is %x %x\n", + rtlpriv->sec.key_buf[0][0], + rtlpriv->sec.key_buf[0][1])); + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + ("add one entry\n")); + if (is_pairwise) { + RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD, + "Pairwiase Key content :", + rtlpriv->sec.pairwise_key, + rtlpriv->sec. + key_len[PAIRWISE_KEYIDX]); + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + ("set Pairwiase key\n")); + + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec. + key_buf[key_index]); + } else { + RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, + ("set group key\n")); + if (mac->opmode == NL80211_IFTYPE_ADHOC) { + rtl_cam_add_one_entry(hw, + rtlefuse->dev_addr, + PAIRWISE_KEYIDX, + CAM_PAIRWISE_KEY_POSITION, + enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf + [entry_id]); + } + rtl_cam_add_one_entry(hw, macaddr, key_index, + entry_id, enc_algo, + CAM_CONFIG_NO_USEDK, + rtlpriv->sec.key_buf[entry_id]); + } + } + } +} + +u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + return rtl_read_dword(rtlpriv, REG_TXDMA_STATUS); +} + +void rtl92c_enable_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + if (IS_HARDWARE_TYPE_8192CE(rtlhal)) { + rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & + 0xFFFFFFFF); + rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & + 0xFFFFFFFF); + rtlpci->irq_enabled = true; + } else { + rtl_write_dword(rtlpriv, REG_HIMR, rtlusb->irq_mask[0] & + 0xFFFFFFFF); + rtl_write_dword(rtlpriv, REG_HIMRE, rtlusb->irq_mask[1] & + 0xFFFFFFFF); + rtlusb->irq_enabled = true; + } +} + +void rtl92c_init_interrupt(struct ieee80211_hw *hw) +{ + rtl92c_enable_interrupt(hw); +} + +void rtl92c_disable_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED); + rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED); + if (IS_HARDWARE_TYPE_8192CE(rtlhal)) + rtlpci->irq_enabled = false; + else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) + rtlusb->irq_enabled = false; +} + +void rtl92c_set_qos(struct ieee80211_hw *hw, int aci) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u32 u4b_ac_param; + + rtl92c_dm_init_edca_turbo(hw); + u4b_ac_param = (u32) mac->ac[aci].aifs; + u4b_ac_param |= + ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) << + AC_PARAM_ECW_MIN_OFFSET; + u4b_ac_param |= + ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) << + AC_PARAM_ECW_MAX_OFFSET; + u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) << + AC_PARAM_TXOP_OFFSET; + RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD, + ("queue:%x, ac_param:%x\n", aci, u4b_ac_param)); + switch (aci) { + case AC1_BK: + rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param); + break; + case AC0_BE: + rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param); + break; + case AC2_VI: + rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param); + break; + case AC3_VO: + rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param); + break; + default: + RT_ASSERT(false, ("invalid aci: %d !\n", aci)); + break; + } +} + +/*------------------------------------------------------------------------- + * HW MAC Address + *-------------------------------------------------------------------------*/ +void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr) +{ + u32 i; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + for (i = 0 ; i < ETH_ALEN ; i++) + rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i)); + + RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("MAC Address: %02X-%02X-%02X-" + "%02X-%02X-%02X\n", + rtl_read_byte(rtlpriv, REG_MACID), + rtl_read_byte(rtlpriv, REG_MACID+1), + rtl_read_byte(rtlpriv, REG_MACID+2), + rtl_read_byte(rtlpriv, REG_MACID+3), + rtl_read_byte(rtlpriv, REG_MACID+4), + rtl_read_byte(rtlpriv, REG_MACID+5))); +} + +void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size); +} + +int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) +{ + u8 value; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + switch (type) { + case NL80211_IFTYPE_UNSPECIFIED: + value = NT_NO_LINK; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("Set Network type to NO LINK!\n")); + break; + case NL80211_IFTYPE_ADHOC: + value = NT_LINK_AD_HOC; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("Set Network type to Ad Hoc!\n")); + break; + case NL80211_IFTYPE_STATION: + value = NT_LINK_AP; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("Set Network type to STA!\n")); + break; + case NL80211_IFTYPE_AP: + value = NT_AS_AP; + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("Set Network type to AP!\n")); + break; + default: + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("Network type %d not support!\n", type)); + return -EOPNOTSUPP; + } + rtl_write_byte(rtlpriv, (REG_CR + 2), value); + return 0; +} + +void rtl92c_init_network_type(struct ieee80211_hw *hw) +{ + rtl92c_set_network_type(hw, NL80211_IFTYPE_UNSPECIFIED); +} + +void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw) +{ + u16 value16; + u32 value32; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + /* Response Rate Set */ + value32 = rtl_read_dword(rtlpriv, REG_RRSR); + value32 &= ~RATE_BITMAP_ALL; + value32 |= RATE_RRSR_CCK_ONLY_1M; + rtl_write_dword(rtlpriv, REG_RRSR, value32); + /* SIFS (used in NAV) */ + value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10); + rtl_write_word(rtlpriv, REG_SPEC_SIFS, value16); + /* Retry Limit */ + value16 = _LRL(0x30) | _SRL(0x30); + rtl_write_dword(rtlpriv, REG_RL, value16); +} + +void rtl92c_init_rate_fallback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + /* Set Data Auto Rate Fallback Retry Count register. */ + rtl_write_dword(rtlpriv, REG_DARFRC, 0x00000000); + rtl_write_dword(rtlpriv, REG_DARFRC+4, 0x10080404); + rtl_write_dword(rtlpriv, REG_RARFRC, 0x04030201); + rtl_write_dword(rtlpriv, REG_RARFRC+4, 0x08070605); +} + +static void rtl92c_set_cck_sifs(struct ieee80211_hw *hw, u8 trx_sifs, + u8 ctx_sifs) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_SIFS_CCK, trx_sifs); + rtl_write_byte(rtlpriv, (REG_SIFS_CCK + 1), ctx_sifs); +} + +static void rtl92c_set_ofdm_sifs(struct ieee80211_hw *hw, u8 trx_sifs, + u8 ctx_sifs) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_SIFS_OFDM, trx_sifs); + rtl_write_byte(rtlpriv, (REG_SIFS_OFDM + 1), ctx_sifs); +} + +void rtl92c_init_edca_param(struct ieee80211_hw *hw, + u16 queue, u16 txop, u8 cw_min, u8 cw_max, u8 aifs) +{ + /* sequence: VO, VI, BE, BK ==> the same as 92C hardware design. + * referenc : enum nl80211_txq_q or ieee80211_set_wmm_default function. + */ + u32 value; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + value = (u32)aifs; + value |= ((u32)cw_min & 0xF) << 8; + value |= ((u32)cw_max & 0xF) << 12; + value |= (u32)txop << 16; + /* 92C hardware register sequence is the same as queue number. */ + rtl_write_dword(rtlpriv, (REG_EDCA_VO_PARAM + (queue * 4)), value); +} + +void rtl92c_init_edca(struct ieee80211_hw *hw) +{ + u16 value16; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + /* disable EDCCA count down, to reduce collison and retry */ + value16 = rtl_read_word(rtlpriv, REG_RD_CTRL); + value16 |= DIS_EDCA_CNT_DWN; + rtl_write_word(rtlpriv, REG_RD_CTRL, value16); + /* Update SIFS timing. ?????????? + * pHalData->SifsTime = 0x0e0e0a0a; */ + rtl92c_set_cck_sifs(hw, 0xa, 0xa); + rtl92c_set_ofdm_sifs(hw, 0xe, 0xe); + /* Set CCK/OFDM SIFS to be 10us. */ + rtl_write_word(rtlpriv, REG_SIFS_CCK, 0x0a0a); + rtl_write_word(rtlpriv, REG_SIFS_OFDM, 0x1010); + rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204); + rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004); + /* TXOP */ + rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B); + rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F); + rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324); + rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226); + /* PIFS */ + rtl_write_byte(rtlpriv, REG_PIFS, 0x1C); + /* AGGR BREAK TIME Register */ + rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16); + rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040); + rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x02); + rtl_write_byte(rtlpriv, REG_ATIMWND, 0x02); +} + +void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x99997631); + rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16); + /* init AMPDU aggregation number, tuning for Tx's TP, */ + rtl_write_word(rtlpriv, 0x4CA, 0x0708); +} + +void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF); +} + +void rtl92c_init_rdg_setting(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xFF); + rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200); + rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05); +} + +void rtl92c_init_retry_function(struct ieee80211_hw *hw) +{ + u8 value8; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + value8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL); + value8 |= EN_AMPDU_RTY_NEW; + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, value8); + /* Set ACK timeout */ + rtl_write_byte(rtlpriv, REG_ACKTO, 0x40); +} + +void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw, + enum version_8192c version) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);/* ms */ + rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/*ms*/ + rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME); + if (IS_NORMAL_CHIP(rtlhal->version)) + rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F); + else + rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF); +} + +void rtl92c_disable_fast_edca(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0); +} + +void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 value = is2T ? MAX_MSS_DENSITY_2T : MAX_MSS_DENSITY_1T; + + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value); +} + +u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + return rtl_read_word(rtlpriv, REG_RXFLTMAP0); +} + +void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter); +} + +u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + return rtl_read_word(rtlpriv, REG_RXFLTMAP1); +} + +void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter); +} + +u16 rtl92c_get_data_filter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + return rtl_read_word(rtlpriv, REG_RXFLTMAP2); +} + +void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter); +} +/*==============================================================*/ + +static u8 _rtl92c_query_rxpwrpercentage(char antpower) +{ + if ((antpower <= -100) || (antpower >= 20)) + return 0; + else if (antpower >= 0) + return 100; + else + return 100 + antpower; +} + +static u8 _rtl92c_evm_db_to_percentage(char value) +{ + char ret_val; + + ret_val = value; + if (ret_val >= 0) + ret_val = 0; + if (ret_val <= -33) + ret_val = -33; + ret_val = 0 - ret_val; + ret_val *= 3; + if (ret_val == 99) + ret_val = 100; + return ret_val; +} + +static long _rtl92c_translate_todbm(struct ieee80211_hw *hw, + u8 signal_strength_index) +{ + long signal_power; + + signal_power = (long)((signal_strength_index + 1) >> 1); + signal_power -= 95; + return signal_power; +} + +static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw, + long currsig) +{ + long retsig; + + if (currsig >= 61 && currsig <= 100) + retsig = 90 + ((currsig - 60) / 4); + else if (currsig >= 41 && currsig <= 60) + retsig = 78 + ((currsig - 40) / 2); + else if (currsig >= 31 && currsig <= 40) + retsig = 66 + (currsig - 30); + else if (currsig >= 21 && currsig <= 30) + retsig = 54 + (currsig - 20); + else if (currsig >= 5 && currsig <= 20) + retsig = 42 + (((currsig - 5) * 2) / 3); + else if (currsig == 4) + retsig = 36; + else if (currsig == 3) + retsig = 27; + else if (currsig == 2) + retsig = 18; + else if (currsig == 1) + retsig = 9; + else + retsig = currsig; + return retsig; +} + +static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw, + struct rtl_stats *pstats, + struct rx_desc_92c *pdesc, + struct rx_fwinfo_92c *p_drvinfo, + bool packet_match_bssid, + bool packet_toself, + bool packet_beacon) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct phy_sts_cck_8192s_t *cck_buf; + s8 rx_pwr_all = 0, rx_pwr[4]; + u8 rf_rx_num = 0, evm, pwdb_all; + u8 i, max_spatial_stream; + u32 rssi, total_rssi = 0; + bool in_powersavemode = false; + bool is_cck_rate; + + is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc); + pstats->packet_matchbssid = packet_match_bssid; + pstats->packet_toself = packet_toself; + pstats->is_cck = is_cck_rate; + pstats->packet_beacon = packet_beacon; + pstats->is_cck = is_cck_rate; + pstats->RX_SIGQ[0] = -1; + pstats->RX_SIGQ[1] = -1; + if (is_cck_rate) { + u8 report, cck_highpwr; + cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo; + if (!in_powersavemode) + cck_highpwr = rtlphy->cck_high_power; + else + cck_highpwr = false; + if (!cck_highpwr) { + u8 cck_agc_rpt = cck_buf->cck_agc_rpt; + report = cck_buf->cck_agc_rpt & 0xc0; + report = report >> 6; + switch (report) { + case 0x3: + rx_pwr_all = -46 - (cck_agc_rpt & 0x3e); + break; + case 0x2: + rx_pwr_all = -26 - (cck_agc_rpt & 0x3e); + break; + case 0x1: + rx_pwr_all = -12 - (cck_agc_rpt & 0x3e); + break; + case 0x0: + rx_pwr_all = 16 - (cck_agc_rpt & 0x3e); + break; + } + } else { + u8 cck_agc_rpt = cck_buf->cck_agc_rpt; + report = p_drvinfo->cfosho[0] & 0x60; + report = report >> 5; + switch (report) { + case 0x3: + rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1); + break; + case 0x2: + rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1); + break; + case 0x1: + rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1); + break; + case 0x0: + rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1); + break; + } + } + pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all); + pstats->rx_pwdb_all = pwdb_all; + pstats->recvsignalpower = rx_pwr_all; + if (packet_match_bssid) { + u8 sq; + if (pstats->rx_pwdb_all > 40) + sq = 100; + else { + sq = cck_buf->sq_rpt; + if (sq > 64) + sq = 0; + else if (sq < 20) + sq = 100; + else + sq = ((64 - sq) * 100) / 44; + } + pstats->signalquality = sq; + pstats->RX_SIGQ[0] = sq; + pstats->RX_SIGQ[1] = -1; + } + } else { + rtlpriv->dm.rfpath_rxenable[0] = + rtlpriv->dm.rfpath_rxenable[1] = true; + for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) { + if (rtlpriv->dm.rfpath_rxenable[i]) + rf_rx_num++; + rx_pwr[i] = + ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110; + rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]); + total_rssi += rssi; + rtlpriv->stats.rx_snr_db[i] = + (long)(p_drvinfo->rxsnr[i] / 2); + + if (packet_match_bssid) + pstats->rx_mimo_signalstrength[i] = (u8) rssi; + } + rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110; + pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all); + pstats->rx_pwdb_all = pwdb_all; + pstats->rxpower = rx_pwr_all; + pstats->recvsignalpower = rx_pwr_all; + if (GET_RX_DESC_RX_MCS(pdesc) && + GET_RX_DESC_RX_MCS(pdesc) >= DESC92C_RATEMCS8 && + GET_RX_DESC_RX_MCS(pdesc) <= DESC92C_RATEMCS15) + max_spatial_stream = 2; + else + max_spatial_stream = 1; + for (i = 0; i < max_spatial_stream; i++) { + evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]); + if (packet_match_bssid) { + if (i == 0) + pstats->signalquality = + (u8) (evm & 0xff); + pstats->RX_SIGQ[i] = + (u8) (evm & 0xff); + } + } + } + if (is_cck_rate) + pstats->signalstrength = + (u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all)); + else if (rf_rx_num != 0) + pstats->signalstrength = + (u8) (_rtl92c_signal_scale_mapping + (hw, total_rssi /= rf_rx_num)); +} + +static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw, + struct rtl_stats *pstats) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u8 rfpath; + u32 last_rssi, tmpval; + + if (pstats->packet_toself || pstats->packet_beacon) { + rtlpriv->stats.rssi_calculate_cnt++; + if (rtlpriv->stats.ui_rssi.total_num++ >= + PHY_RSSI_SLID_WIN_MAX) { + rtlpriv->stats.ui_rssi.total_num = + PHY_RSSI_SLID_WIN_MAX; + last_rssi = + rtlpriv->stats.ui_rssi.elements[rtlpriv-> + stats.ui_rssi.index]; + rtlpriv->stats.ui_rssi.total_val -= last_rssi; + } + rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength; + rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi. + index++] = pstats->signalstrength; + if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX) + rtlpriv->stats.ui_rssi.index = 0; + tmpval = rtlpriv->stats.ui_rssi.total_val / + rtlpriv->stats.ui_rssi.total_num; + rtlpriv->stats.signal_strength = + _rtl92c_translate_todbm(hw, (u8) tmpval); + pstats->rssi = rtlpriv->stats.signal_strength; + } + if (!pstats->is_cck && pstats->packet_toself) { + for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; + rfpath++) { + if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath)) + continue; + if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) { + rtlpriv->stats.rx_rssi_percentage[rfpath] = + pstats->rx_mimo_signalstrength[rfpath]; + } + if (pstats->rx_mimo_signalstrength[rfpath] > + rtlpriv->stats.rx_rssi_percentage[rfpath]) { + rtlpriv->stats.rx_rssi_percentage[rfpath] = + ((rtlpriv->stats. + rx_rssi_percentage[rfpath] * + (RX_SMOOTH_FACTOR - 1)) + + (pstats->rx_mimo_signalstrength[rfpath])) / + (RX_SMOOTH_FACTOR); + + rtlpriv->stats.rx_rssi_percentage[rfpath] = + rtlpriv->stats.rx_rssi_percentage[rfpath] + + 1; + } else { + rtlpriv->stats.rx_rssi_percentage[rfpath] = + ((rtlpriv->stats. + rx_rssi_percentage[rfpath] * + (RX_SMOOTH_FACTOR - 1)) + + (pstats->rx_mimo_signalstrength[rfpath])) / + (RX_SMOOTH_FACTOR); + } + } + } +} + +static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw, + struct rtl_stats *pstats) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + int weighting = 0; + + if (rtlpriv->stats.recv_signal_power == 0) + rtlpriv->stats.recv_signal_power = pstats->recvsignalpower; + if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power) + weighting = 5; + else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power) + weighting = (-5); + rtlpriv->stats.recv_signal_power = + (rtlpriv->stats.recv_signal_power * 5 + + pstats->recvsignalpower + weighting) / 6; +} + +static void _rtl92c_process_pwdb(struct ieee80211_hw *hw, + struct rtl_stats *pstats) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + long undecorated_smoothed_pwdb = 0; + + if (mac->opmode == NL80211_IFTYPE_ADHOC) { + return; + } else { + undecorated_smoothed_pwdb = + rtlpriv->dm.undecorated_smoothed_pwdb; + } + if (pstats->packet_toself || pstats->packet_beacon) { + if (undecorated_smoothed_pwdb < 0) + undecorated_smoothed_pwdb = pstats->rx_pwdb_all; + if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) { + undecorated_smoothed_pwdb = + (((undecorated_smoothed_pwdb) * + (RX_SMOOTH_FACTOR - 1)) + + (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); + undecorated_smoothed_pwdb = undecorated_smoothed_pwdb + + 1; + } else { + undecorated_smoothed_pwdb = + (((undecorated_smoothed_pwdb) * + (RX_SMOOTH_FACTOR - 1)) + + (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); + } + rtlpriv->dm.undecorated_smoothed_pwdb = + undecorated_smoothed_pwdb; + _rtl92c_update_rxsignalstatistics(hw, pstats); + } +} + +static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw, + struct rtl_stats *pstats) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 last_evm = 0, n_stream, tmpval; + + if (pstats->signalquality != 0) { + if (pstats->packet_toself || pstats->packet_beacon) { + if (rtlpriv->stats.LINK_Q.total_num++ >= + PHY_LINKQUALITY_SLID_WIN_MAX) { + rtlpriv->stats.LINK_Q.total_num = + PHY_LINKQUALITY_SLID_WIN_MAX; + last_evm = + rtlpriv->stats.LINK_Q.elements + [rtlpriv->stats.LINK_Q.index]; + rtlpriv->stats.LINK_Q.total_val -= + last_evm; + } + rtlpriv->stats.LINK_Q.total_val += + pstats->signalquality; + rtlpriv->stats.LINK_Q.elements + [rtlpriv->stats.LINK_Q.index++] = + pstats->signalquality; + if (rtlpriv->stats.LINK_Q.index >= + PHY_LINKQUALITY_SLID_WIN_MAX) + rtlpriv->stats.LINK_Q.index = 0; + tmpval = rtlpriv->stats.LINK_Q.total_val / + rtlpriv->stats.LINK_Q.total_num; + rtlpriv->stats.signal_quality = tmpval; + rtlpriv->stats.last_sigstrength_inpercent = tmpval; + for (n_stream = 0; n_stream < 2; + n_stream++) { + if (pstats->RX_SIGQ[n_stream] != -1) { + if (!rtlpriv->stats.RX_EVM[n_stream]) { + rtlpriv->stats.RX_EVM[n_stream] + = pstats->RX_SIGQ[n_stream]; + } + rtlpriv->stats.RX_EVM[n_stream] = + ((rtlpriv->stats.RX_EVM + [n_stream] * + (RX_SMOOTH_FACTOR - 1)) + + (pstats->RX_SIGQ + [n_stream] * 1)) / + (RX_SMOOTH_FACTOR); + } + } + } + } else { + ; + } +} + +static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw, + u8 *buffer, + struct rtl_stats *pcurrent_stats) +{ + if (!pcurrent_stats->packet_matchbssid && + !pcurrent_stats->packet_beacon) + return; + _rtl92c_process_ui_rssi(hw, pcurrent_stats); + _rtl92c_process_pwdb(hw, pcurrent_stats); + _rtl92c_process_LINK_Q(hw, pcurrent_stats); +} + +void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct rtl_stats *pstats, + struct rx_desc_92c *pdesc, + struct rx_fwinfo_92c *p_drvinfo) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct ieee80211_hdr *hdr; + u8 *tmp_buf; + u8 *praddr; + u8 *psaddr; + __le16 fc; + u16 type, cpu_fc; + bool packet_matchbssid, packet_toself, packet_beacon; + + tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift; + hdr = (struct ieee80211_hdr *)tmp_buf; + fc = hdr->frame_control; + cpu_fc = le16_to_cpu(fc); + type = WLAN_FC_GET_TYPE(fc); + praddr = hdr->addr1; + psaddr = hdr->addr2; + packet_matchbssid = + ((IEEE80211_FTYPE_CTL != type) && + (!compare_ether_addr(mac->bssid, + (cpu_fc & IEEE80211_FCTL_TODS) ? + hdr->addr1 : (cpu_fc & IEEE80211_FCTL_FROMDS) ? + hdr->addr2 : hdr->addr3)) && + (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv)); + + packet_toself = packet_matchbssid && + (!compare_ether_addr(praddr, rtlefuse->dev_addr)); + if (ieee80211_is_beacon(fc)) + packet_beacon = true; + _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, + packet_matchbssid, packet_toself, + packet_beacon); + _rtl92c_process_phyinfo(hw, tmp_buf, pstats); +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h new file mode 100644 index 000000000000..298fdb724aa5 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h @@ -0,0 +1,180 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#ifndef __RTL92C_MAC_H__ +#define __RTL92C_MAC_H__ + +#define LLT_LAST_ENTRY_OF_TX_PKT_BUFFER 255 +#define DRIVER_EARLY_INT_TIME 0x05 +#define BCN_DMA_ATIME_INT_TIME 0x02 + +void rtl92c_read_chip_version(struct ieee80211_hw *hw); +bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data); +bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary); +void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all); +void rtl92c_enable_interrupt(struct ieee80211_hw *hw); +void rtl92c_disable_interrupt(struct ieee80211_hw *hw); +void rtl92c_set_qos(struct ieee80211_hw *hw, int aci); + + +/*--------------------------------------------------------------- + * Hardware init functions + *---------------------------------------------------------------*/ +void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr); +void rtl92c_init_interrupt(struct ieee80211_hw *hw); +void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size); + +int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type); +void rtl92c_init_network_type(struct ieee80211_hw *hw); +void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw); +void rtl92c_init_rate_fallback(struct ieee80211_hw *hw); + +void rtl92c_init_edca_param(struct ieee80211_hw *hw, + u16 queue, + u16 txop, + u8 ecwmax, + u8 ecwmin, + u8 aifs); + +void rtl92c_init_edca(struct ieee80211_hw *hw); +void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw); +void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode); +void rtl92c_init_rdg_setting(struct ieee80211_hw *hw); +void rtl92c_init_retry_function(struct ieee80211_hw *hw); + +void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw, + enum version_8192c version); + +void rtl92c_disable_fast_edca(struct ieee80211_hw *hw); +void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T); + +/* For filter */ +u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw); +void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter); +u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw); +void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter); +u16 rtl92c_get_data_filter(struct ieee80211_hw *hw); +void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter); + + +u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw); + +#define RX_HAL_IS_CCK_RATE(_pdesc)\ + (GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE1M ||\ + GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE2M ||\ + GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE5_5M ||\ + GET_RX_DESC_RX_MCS(_pdesc) == DESC92C_RATE11M) + +struct rx_fwinfo_92c { + u8 gain_trsw[4]; + u8 pwdb_all; + u8 cfosho[4]; + u8 cfotail[4]; + char rxevm[2]; + char rxsnr[4]; + u8 pdsnr[2]; + u8 csi_current[2]; + u8 csi_target[2]; + u8 sigevm; + u8 max_ex_pwr; + u8 ex_intf_flag:1; + u8 sgi_en:1; + u8 rxsc:2; + u8 reserve:4; +} __packed; + +struct rx_desc_92c { + u32 length:14; + u32 crc32:1; + u32 icverror:1; + u32 drv_infosize:4; + u32 security:3; + u32 qos:1; + u32 shift:2; + u32 phystatus:1; + u32 swdec:1; + u32 lastseg:1; + u32 firstseg:1; + u32 eor:1; + u32 own:1; + u32 macid:5; /* word 1 */ + u32 tid:4; + u32 hwrsvd:5; + u32 paggr:1; + u32 faggr:1; + u32 a1_fit:4; + u32 a2_fit:4; + u32 pam:1; + u32 pwr:1; + u32 moredata:1; + u32 morefrag:1; + u32 type:2; + u32 mc:1; + u32 bc:1; + u32 seq:12; /* word 2 */ + u32 frag:4; + u32 nextpktlen:14; + u32 nextind:1; + u32 rsvd:1; + u32 rxmcs:6; /* word 3 */ + u32 rxht:1; + u32 amsdu:1; + u32 splcp:1; + u32 bandwidth:1; + u32 htc:1; + u32 tcpchk_rpt:1; + u32 ipcchk_rpt:1; + u32 tcpchk_valid:1; + u32 hwpcerr:1; + u32 hwpcind:1; + u32 iv0:16; + u32 iv1; /* word 4 */ + u32 tsfl; /* word 5 */ + u32 bufferaddress; /* word 6 */ + u32 bufferaddress64; /* word 7 */ +} __packed; + +enum rtl_desc_qsel rtl92c_map_hwqueue_to_fwqueue(u16 fc, + unsigned int + skb_queue); +void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct rtl_stats *pstats, + struct rx_desc_92c *pdesc, + struct rx_fwinfo_92c *p_drvinfo); + +/*--------------------------------------------------------------- + * Card disable functions + *---------------------------------------------------------------*/ + + + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c new file mode 100644 index 000000000000..4e020e654e6b --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.c @@ -0,0 +1,607 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../pci.h" +#include "../ps.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "rf.h" +#include "dm.h" +#include "table.h" + +u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 original_value, readback_value, bitshift; + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), " + "rfpath(%#x), bitmask(%#x)\n", + regaddr, rfpath, bitmask)); + if (rtlphy->rf_mode != RF_OP_BY_FW) { + original_value = _rtl92c_phy_rf_serial_read(hw, + rfpath, regaddr); + } else { + original_value = _rtl92c_phy_fw_rf_serial_read(hw, + rfpath, regaddr); + } + bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + readback_value = (original_value & bitmask) >> bitshift; + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + ("regaddr(%#x), rfpath(%#x), " + "bitmask(%#x), original_value(%#x)\n", + regaddr, rfpath, bitmask, original_value)); + return readback_value; +} + +void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 original_value, bitshift; + + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, + ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n", + regaddr, bitmask, data, rfpath)); + if (rtlphy->rf_mode != RF_OP_BY_FW) { + if (bitmask != RFREG_OFFSET_MASK) { + original_value = _rtl92c_phy_rf_serial_read(hw, + rfpath, + regaddr); + bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + data = + ((original_value & (~bitmask)) | + (data << bitshift)); + } + _rtl92c_phy_rf_serial_write(hw, rfpath, regaddr, data); + } else { + if (bitmask != RFREG_OFFSET_MASK) { + original_value = _rtl92c_phy_fw_rf_serial_read(hw, + rfpath, + regaddr); + bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + data = + ((original_value & (~bitmask)) | + (data << bitshift)); + } + _rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data); + } + RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), " + "bitmask(%#x), data(%#x), rfpath(%#x)\n", + regaddr, bitmask, data, rfpath)); +} + +bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw) +{ + bool rtstatus; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + bool is92c = IS_92C_SERIAL(rtlhal->version); + + rtstatus = _rtl92cu_phy_config_mac_with_headerfile(hw); + if (is92c && IS_HARDWARE_TYPE_8192CE(rtlhal)) + rtl_write_byte(rtlpriv, 0x14, 0x71); + return rtstatus; +} + +bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw) +{ + bool rtstatus = true; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u16 regval; + u8 b_reg_hwparafile = 1; + + _rtl92c_phy_init_bb_rf_register_definition(hw); + regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); + rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, regval | BIT(13) | + BIT(0) | BIT(1)); + rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83); + rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb); + rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB); + if (IS_HARDWARE_TYPE_8192CE(rtlhal)) { + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_PPLL | FEN_PCIEA | + FEN_DIO_PCIE | FEN_BB_GLB_RSTn | FEN_BBRSTB); + } else if (IS_HARDWARE_TYPE_8192CU(rtlhal)) { + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD | + FEN_BB_GLB_RSTn | FEN_BBRSTB); + rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f); + } + rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80); + if (b_reg_hwparafile == 1) + rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw); + return rtstatus; +} + +bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 i; + u32 arraylength; + u32 *ptrarray; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n")); + arraylength = rtlphy->hwparam_tables[MAC_REG].length ; + ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Img:RTL8192CEMAC_2T_ARRAY\n")); + for (i = 0; i < arraylength; i = i + 2) + rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]); + return true; +} + +bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype) +{ + int i; + u32 *phy_regarray_table; + u32 *agctab_array_table; + u16 phy_reg_arraylen, agctab_arraylen; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (IS_92C_SERIAL(rtlhal->version)) { + agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_2T].length; + agctab_array_table = rtlphy->hwparam_tables[AGCTAB_2T].pdata; + phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_2T].length; + phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_2T].pdata; + } else { + agctab_arraylen = rtlphy->hwparam_tables[AGCTAB_1T].length; + agctab_array_table = rtlphy->hwparam_tables[AGCTAB_1T].pdata; + phy_reg_arraylen = rtlphy->hwparam_tables[PHY_REG_1T].length; + phy_regarray_table = rtlphy->hwparam_tables[PHY_REG_1T].pdata; + } + if (configtype == BASEBAND_CONFIG_PHY_REG) { + for (i = 0; i < phy_reg_arraylen; i = i + 2) { + if (phy_regarray_table[i] == 0xfe) + mdelay(50); + else if (phy_regarray_table[i] == 0xfd) + mdelay(5); + else if (phy_regarray_table[i] == 0xfc) + mdelay(1); + else if (phy_regarray_table[i] == 0xfb) + udelay(50); + else if (phy_regarray_table[i] == 0xfa) + udelay(5); + else if (phy_regarray_table[i] == 0xf9) + udelay(1); + rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD, + phy_regarray_table[i + 1]); + udelay(1); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("The phy_regarray_table[0] is %x" + " Rtl819XPHY_REGArray[1] is %x\n", + phy_regarray_table[i], + phy_regarray_table[i + 1])); + } + } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { + for (i = 0; i < agctab_arraylen; i = i + 2) { + rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD, + agctab_array_table[i + 1]); + udelay(1); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("The agctab_array_table[0] is " + "%x Rtl819XPHY_REGArray[1] is %x\n", + agctab_array_table[i], + agctab_array_table[i + 1])); + } + } + return true; +} + +bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + int i; + u32 *phy_regarray_table_pg; + u16 phy_regarray_pg_len; + + rtlphy->pwrgroup_cnt = 0; + phy_regarray_pg_len = rtlphy->hwparam_tables[PHY_REG_PG].length; + phy_regarray_table_pg = rtlphy->hwparam_tables[PHY_REG_PG].pdata; + if (configtype == BASEBAND_CONFIG_PHY_REG) { + for (i = 0; i < phy_regarray_pg_len; i = i + 3) { + if (phy_regarray_table_pg[i] == 0xfe) + mdelay(50); + else if (phy_regarray_table_pg[i] == 0xfd) + mdelay(5); + else if (phy_regarray_table_pg[i] == 0xfc) + mdelay(1); + else if (phy_regarray_table_pg[i] == 0xfb) + udelay(50); + else if (phy_regarray_table_pg[i] == 0xfa) + udelay(5); + else if (phy_regarray_table_pg[i] == 0xf9) + udelay(1); + _rtl92c_store_pwrIndex_diffrate_offset(hw, + phy_regarray_table_pg[i], + phy_regarray_table_pg[i + 1], + phy_regarray_table_pg[i + 2]); + } + } else { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + ("configtype != BaseBand_Config_PHY_REG\n")); + } + return true; +} + +bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath) +{ + int i; + u32 *radioa_array_table; + u32 *radiob_array_table; + u16 radioa_arraylen, radiob_arraylen; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + if (IS_92C_SERIAL(rtlhal->version)) { + radioa_arraylen = rtlphy->hwparam_tables[RADIOA_2T].length; + radioa_array_table = rtlphy->hwparam_tables[RADIOA_2T].pdata; + radiob_arraylen = rtlphy->hwparam_tables[RADIOB_2T].length; + radiob_array_table = rtlphy->hwparam_tables[RADIOB_2T].pdata; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Radio_A:RTL8192CERADIOA_2TARRAY\n")); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Radio_B:RTL8192CE_RADIOB_2TARRAY\n")); + } else { + radioa_arraylen = rtlphy->hwparam_tables[RADIOA_1T].length; + radioa_array_table = rtlphy->hwparam_tables[RADIOA_1T].pdata; + radiob_arraylen = rtlphy->hwparam_tables[RADIOB_1T].length; + radiob_array_table = rtlphy->hwparam_tables[RADIOB_1T].pdata; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Radio_A:RTL8192CE_RADIOA_1TARRAY\n")); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n")); + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath)); + switch (rfpath) { + case RF90_PATH_A: + for (i = 0; i < radioa_arraylen; i = i + 2) { + if (radioa_array_table[i] == 0xfe) + mdelay(50); + else if (radioa_array_table[i] == 0xfd) + mdelay(5); + else if (radioa_array_table[i] == 0xfc) + mdelay(1); + else if (radioa_array_table[i] == 0xfb) + udelay(50); + else if (radioa_array_table[i] == 0xfa) + udelay(5); + else if (radioa_array_table[i] == 0xf9) + udelay(1); + else { + rtl_set_rfreg(hw, rfpath, radioa_array_table[i], + RFREG_OFFSET_MASK, + radioa_array_table[i + 1]); + udelay(1); + } + } + break; + case RF90_PATH_B: + for (i = 0; i < radiob_arraylen; i = i + 2) { + if (radiob_array_table[i] == 0xfe) { + mdelay(50); + } else if (radiob_array_table[i] == 0xfd) + mdelay(5); + else if (radiob_array_table[i] == 0xfc) + mdelay(1); + else if (radiob_array_table[i] == 0xfb) + udelay(50); + else if (radiob_array_table[i] == 0xfa) + udelay(5); + else if (radiob_array_table[i] == 0xf9) + udelay(1); + else { + rtl_set_rfreg(hw, rfpath, radiob_array_table[i], + RFREG_OFFSET_MASK, + radiob_array_table[i + 1]); + udelay(1); + } + } + break; + case RF90_PATH_C: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("switch case not process\n")); + break; + case RF90_PATH_D: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("switch case not process\n")); + break; + } + return true; +} + +void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u8 reg_bw_opmode; + u8 reg_prsr_rsc; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, + ("Switch to %s bandwidth\n", + rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? + "20MHz" : "40MHz")) + if (is_hal_stop(rtlhal)) { + rtlphy->set_bwmode_inprogress = false; + return; + } + reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE); + reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2); + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + reg_bw_opmode |= BW_OPMODE_20MHZ; + rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode); + break; + case HT_CHANNEL_WIDTH_20_40: + reg_bw_opmode &= ~BW_OPMODE_20MHZ; + rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode); + reg_prsr_rsc = + (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5); + rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw)); + break; + } + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0); + rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0); + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1); + break; + case HT_CHANNEL_WIDTH_20_40: + rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1); + rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1); + rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND, + (mac->cur_40_prime_sc >> 1)); + rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc); + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0); + rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)), + (mac->cur_40_prime_sc == + HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw)); + break; + } + rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); + rtlphy->set_bwmode_inprogress = false; + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n")); +} + +void rtl92cu_bb_block_on(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + mutex_lock(&rtlpriv->io.bb_mutex); + rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1); + rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1); + mutex_unlock(&rtlpriv->io.bb_mutex); +} + +void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) +{ + u8 tmpreg; + u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + tmpreg = rtl_read_byte(rtlpriv, 0xd03); + + if ((tmpreg & 0x70) != 0) + rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F); + else + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + + if ((tmpreg & 0x70) != 0) { + rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS); + if (is2t) + rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00, + MASK12BITS); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, + (rf_a_mode & 0x8FFFF) | 0x10000); + if (is2t) + rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, + (rf_b_mode & 0x8FFFF) | 0x10000); + } + lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS); + rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000); + mdelay(100); + if ((tmpreg & 0x70) != 0) { + rtl_write_byte(rtlpriv, 0xd03, tmpreg); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode); + if (is2t) + rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS, + rf_b_mode); + } else { + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); + } +} + +bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool bresult = true; + u8 i, queue_id; + struct rtl8192_tx_ring *ring = NULL; + + ppsc->set_rfpowerstate_inprogress = true; + switch (rfpwr_state) { + case ERFON: + if ((ppsc->rfpwr_state == ERFOFF) && + RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { + bool rtstatus; + u32 InitializeCount = 0; + + do { + InitializeCount++; + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + ("IPS Set eRf nic enable\n")); + rtstatus = rtl_ps_enable_nic(hw); + } while ((rtstatus != true) + && (InitializeCount < 10)); + RT_CLEAR_PS_LEVEL(ppsc, + RT_RF_OFF_LEVL_HALT_NIC); + } else { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + ("Set ERFON sleeped:%d ms\n", + jiffies_to_msecs(jiffies - + ppsc-> + last_sleep_jiffies))); + ppsc->last_awake_jiffies = jiffies; + rtl92ce_phy_set_rf_on(hw); + } + if (mac->link_state == MAC80211_LINKED) { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_LINK); + } else { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_NO_LINK); + } + break; + case ERFOFF: + for (queue_id = 0, i = 0; + queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { + ring = &pcipriv->dev.tx_ring[queue_id]; + if (skb_queue_len(&ring->queue) == 0 || + queue_id == BEACON_QUEUE) { + queue_id++; + continue; + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + ("eRf Off/Sleep: %d times " + "TcbBusyQueue[%d] " + "=%d before doze!\n", (i + 1), + queue_id, + skb_queue_len(&ring->queue))); + udelay(10); + i++; + } + if (i >= MAX_DOZE_WAITING_TIMES_9x) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + ("\nERFOFF: %d times " + "TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue))); + break; + } + } + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) { + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + ("IPS Set eRf nic disable\n")); + rtl_ps_disable_nic(hw); + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + } else { + if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_NO_LINK); + } else { + rtlpriv->cfg->ops->led_control(hw, + LED_CTL_POWER_OFF); + } + } + break; + case ERFSLEEP: + if (ppsc->rfpwr_state == ERFOFF) + break; + for (queue_id = 0, i = 0; + queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) { + ring = &pcipriv->dev.tx_ring[queue_id]; + if (skb_queue_len(&ring->queue) == 0) { + queue_id++; + continue; + } else { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + ("eRf Off/Sleep: %d times " + "TcbBusyQueue[%d] =%d before " + "doze!\n", (i + 1), queue_id, + skb_queue_len(&ring->queue))); + udelay(10); + i++; + } + if (i >= MAX_DOZE_WAITING_TIMES_9x) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + ("\n ERFSLEEP: %d times " + "TcbBusyQueue[%d] = %d !\n", + MAX_DOZE_WAITING_TIMES_9x, + queue_id, + skb_queue_len(&ring->queue))); + break; + } + } + RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG, + ("Set ERFSLEEP awaked:%d ms\n", + jiffies_to_msecs(jiffies - + ppsc->last_awake_jiffies))); + ppsc->last_sleep_jiffies = jiffies; + _rtl92c_phy_set_rf_sleep(hw); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("switch case not process\n")); + bresult = false; + break; + } + if (bresult) + ppsc->rfpwr_state = rfpwr_state; + ppsc->set_rfpowerstate_inprogress = false; + return bresult; +} + +bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) +{ + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool bresult = false; + + if (rfpwr_state == ppsc->rfpwr_state) + return bresult; + bresult = _rtl92cu_phy_set_rf_power_state(hw, rfpwr_state); + return bresult; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h new file mode 100644 index 000000000000..06299559ab68 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/phy.h @@ -0,0 +1,36 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../rtl8192ce/phy.h" + +void rtl92cu_bb_block_on(struct ieee80211_hw *hw); +bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath); +void rtl92c_phy_set_io(struct ieee80211_hw *hw); +bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); +bool rtl92cu_phy_bb_config(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h new file mode 100644 index 000000000000..7f1be614c998 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/reg.h @@ -0,0 +1,30 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../rtl8192ce/reg.h" diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c new file mode 100644 index 000000000000..1c79c226f145 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c @@ -0,0 +1,493 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../wifi.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "rf.h" +#include "dm.h" + +static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw); + +void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + + switch (bandwidth) { + case HT_CHANNEL_WIDTH_20: + rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & + 0xfffff3ff) | 0x0400); + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[0]); + break; + case HT_CHANNEL_WIDTH_20_40: + rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & + 0xfffff3ff)); + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[0]); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("unknown bandwidth: %#X\n", bandwidth)); + break; + } +} + +void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u32 tx_agc[2] = { 0, 0 }, tmpval = 0; + bool turbo_scanoff = false; + u8 idx1, idx2; + u8 *ptr; + + if (rtlhal->interface == INTF_PCI) { + if (rtlefuse->eeprom_regulatory != 0) + turbo_scanoff = true; + } else { + if ((rtlefuse->eeprom_regulatory != 0) || + (rtlefuse->external_pa)) + turbo_scanoff = true; + } + if (mac->act_scanning == true) { + tx_agc[RF90_PATH_A] = 0x3f3f3f3f; + tx_agc[RF90_PATH_B] = 0x3f3f3f3f; + if (turbo_scanoff) { + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); + if (rtlhal->interface == INTF_USB) { + if (tx_agc[idx1] > 0x20 && + rtlefuse->external_pa) + tx_agc[idx1] = 0x20; + } + } + } + } else { + if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_LEVEL1) { + tx_agc[RF90_PATH_A] = 0x10101010; + tx_agc[RF90_PATH_B] = 0x10101010; + } else if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_LEVEL1) { + tx_agc[RF90_PATH_A] = 0x00000000; + tx_agc[RF90_PATH_B] = 0x00000000; + } else{ + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + tx_agc[idx1] = ppowerlevel[idx1] | + (ppowerlevel[idx1] << 8) | + (ppowerlevel[idx1] << 16) | + (ppowerlevel[idx1] << 24); + } + if (rtlefuse->eeprom_regulatory == 0) { + tmpval = (rtlphy->mcs_txpwrlevel_origoffset + [0][6]) + + (rtlphy->mcs_txpwrlevel_origoffset + [0][7] << 8); + tx_agc[RF90_PATH_A] += tmpval; + tmpval = (rtlphy->mcs_txpwrlevel_origoffset + [0][14]) + + (rtlphy->mcs_txpwrlevel_origoffset + [0][15] << 24); + tx_agc[RF90_PATH_B] += tmpval; + } + } + } + for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { + ptr = (u8 *) (&(tx_agc[idx1])); + for (idx2 = 0; idx2 < 4; idx2++) { + if (*ptr > RF6052_MAX_TX_PWR) + *ptr = RF6052_MAX_TX_PWR; + ptr++; + } + } + tmpval = tx_agc[RF90_PATH_A] & 0xff; + rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval); + + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_A_CCK1_MCS32)); + + tmpval = tx_agc[RF90_PATH_A] >> 8; + if (mac->mode == WIRELESS_MODE_B) + tmpval = tmpval & 0xff00ffff; + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_B_CCK11_A_CCK2_11)); + tmpval = tx_agc[RF90_PATH_B] >> 24; + rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_B_CCK11_A_CCK2_11)); + tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff; + rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, + RTXAGC_B_CCK1_55_MCS32)); +} + +static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel, + u32 *ofdmbase, u32 *mcsbase) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u32 powerBase0, powerBase1; + u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0; + u8 i, powerlevel[2]; + + for (i = 0; i < 2; i++) { + powerlevel[i] = ppowerlevel[i]; + legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1]; + powerBase0 = powerlevel[i] + legacy_pwrdiff; + powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | + (powerBase0 << 8) | powerBase0; + *(ofdmbase + i) = powerBase0; + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + (" [OFDM power base index rf(%c) = 0x%x]\n", + ((i == 0) ? 'A' : 'B'), *(ofdmbase + i))); + } + for (i = 0; i < 2; i++) { + if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { + ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1]; + powerlevel[i] += ht20_pwrdiff; + } + powerBase1 = powerlevel[i]; + powerBase1 = (powerBase1 << 24) | + (powerBase1 << 16) | (powerBase1 << 8) | powerBase1; + *(mcsbase + i) = powerBase1; + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + (" [MCS power base index rf(%c) = 0x%x]\n", + ((i == 0) ? 'A' : 'B'), *(mcsbase + i))); + } +} + +static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, + u8 channel, u8 index, + u32 *powerBase0, + u32 *powerBase1, + u32 *p_outwriteval) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + u8 i, chnlgroup = 0, pwr_diff_limit[4]; + u32 writeVal, customer_limit, rf; + + for (rf = 0; rf < 2; rf++) { + switch (rtlefuse->eeprom_regulatory) { + case 0: + chnlgroup = 0; + writeVal = rtlphy->mcs_txpwrlevel_origoffset + [chnlgroup][index + (rf ? 8 : 0)] + + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("RTK better performance,writeVal(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeVal)); + break; + case 1: + if (rtlphy->pwrgroup_cnt == 1) + chnlgroup = 0; + if (rtlphy->pwrgroup_cnt >= 3) { + if (channel <= 3) + chnlgroup = 0; + else if (channel >= 4 && channel <= 9) + chnlgroup = 1; + else if (channel > 9) + chnlgroup = 2; + if (rtlphy->current_chan_bw == + HT_CHANNEL_WIDTH_20) + chnlgroup++; + else + chnlgroup += 4; + } + writeVal = rtlphy->mcs_txpwrlevel_origoffset + [chnlgroup][index + + (rf ? 8 : 0)] + + ((index < 2) ? powerBase0[rf] : + powerBase1[rf]); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("Realtek regulatory, 20MHz, " + "writeVal(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeVal)); + break; + case 2: + writeVal = ((index < 2) ? powerBase0[rf] : + powerBase1[rf]); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("Better regulatory,writeVal(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeVal)); + break; + case 3: + chnlgroup = 0; + if (rtlphy->current_chan_bw == + HT_CHANNEL_WIDTH_20_40) { + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("customer's limit, 40MHzrf(%c) = " + "0x%x\n", ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht40[rf] + [channel - 1])); + } else { + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("customer's limit, 20MHz rf(%c) = " + "0x%x\n", ((rf == 0) ? 'A' : 'B'), + rtlefuse->pwrgroup_ht20[rf] + [channel - 1])); + } + for (i = 0; i < 4; i++) { + pwr_diff_limit[i] = + (u8) ((rtlphy->mcs_txpwrlevel_origoffset + [chnlgroup][index + (rf ? 8 : 0)] + & (0x7f << (i * 8))) >> (i * 8)); + if (rtlphy->current_chan_bw == + HT_CHANNEL_WIDTH_20_40) { + if (pwr_diff_limit[i] > + rtlefuse->pwrgroup_ht40[rf] + [channel - 1]) + pwr_diff_limit[i] = rtlefuse-> + pwrgroup_ht40[rf] + [channel - 1]; + } else { + if (pwr_diff_limit[i] > + rtlefuse->pwrgroup_ht20[rf] + [channel - 1]) + pwr_diff_limit[i] = + rtlefuse->pwrgroup_ht20[rf] + [channel - 1]; + } + } + customer_limit = (pwr_diff_limit[3] << 24) | + (pwr_diff_limit[2] << 16) | + (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("Customer's limit rf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), customer_limit)); + writeVal = customer_limit + ((index < 2) ? + powerBase0[rf] : powerBase1[rf]); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("Customer, writeVal rf(%c)= 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeVal)); + break; + default: + chnlgroup = 0; + writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] + [index + (rf ? 8 : 0)] + ((index < 2) ? + powerBase0[rf] : powerBase1[rf]); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, ("RTK better " + "performance, writeValrf(%c) = 0x%x\n", + ((rf == 0) ? 'A' : 'B'), writeVal)); + break; + } + if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_LEVEL1) + writeVal = 0x14141414; + else if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_LEVEL2) + writeVal = 0x00000000; + if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) + writeVal = writeVal - 0x06060606; + else if (rtlpriv->dm.dynamic_txhighpower_lvl == + TXHIGHPWRLEVEL_BT2) + writeVal = writeVal; + *(p_outwriteval + rf) = writeVal; + } +} + +static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, + u8 index, u32 *pValue) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u16 regoffset_a[6] = { + RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24, + RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04, + RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12 + }; + u16 regoffset_b[6] = { + RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24, + RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04, + RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 + }; + u8 i, rf, pwr_val[4]; + u32 writeVal; + u16 regoffset; + + for (rf = 0; rf < 2; rf++) { + writeVal = pValue[rf]; + for (i = 0; i < 4; i++) { + pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >> + (i * 8)); + if (pwr_val[i] > RF6052_MAX_TX_PWR) + pwr_val[i] = RF6052_MAX_TX_PWR; + } + writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) | + (pwr_val[1] << 8) | pwr_val[0]; + if (rf == 0) + regoffset = regoffset_a[index]; + else + regoffset = regoffset_b[index]; + rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal); + RTPRINT(rtlpriv, FPHY, PHY_TXPWR, + ("Set 0x%x = %08x\n", regoffset, writeVal)); + if (((get_rf_type(rtlphy) == RF_2T2R) && + (regoffset == RTXAGC_A_MCS15_MCS12 || + regoffset == RTXAGC_B_MCS15_MCS12)) || + ((get_rf_type(rtlphy) != RF_2T2R) && + (regoffset == RTXAGC_A_MCS07_MCS04 || + regoffset == RTXAGC_B_MCS07_MCS04))) { + writeVal = pwr_val[3]; + if (regoffset == RTXAGC_A_MCS15_MCS12 || + regoffset == RTXAGC_A_MCS07_MCS04) + regoffset = 0xc90; + if (regoffset == RTXAGC_B_MCS15_MCS12 || + regoffset == RTXAGC_B_MCS07_MCS04) + regoffset = 0xc98; + for (i = 0; i < 3; i++) { + writeVal = (writeVal > 6) ? (writeVal - 6) : 0; + rtl_write_byte(rtlpriv, (u32)(regoffset + i), + (u8)writeVal); + } + } + } +} + +void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel) +{ + u32 writeVal[2], powerBase0[2], powerBase1[2]; + u8 index = 0; + + rtl92c_phy_get_power_base(hw, ppowerlevel, + channel, &powerBase0[0], &powerBase1[0]); + for (index = 0; index < 6; index++) { + _rtl92c_get_txpower_writeval_by_regulatory(hw, + channel, index, + &powerBase0[0], + &powerBase1[0], + &writeVal[0]); + _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]); + } +} + +bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + bool rtstatus = true; + u8 b_reg_hwparafile = 1; + + if (rtlphy->rf_type == RF_1T1R) + rtlphy->num_total_rfpath = 1; + else + rtlphy->num_total_rfpath = 2; + if (b_reg_hwparafile == 1) + rtstatus = _rtl92c_phy_rf6052_config_parafile(hw); + return rtstatus; +} + +static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + u32 u4_regvalue = 0; + u8 rfpath; + bool rtstatus = true; + struct bb_reg_def *pphyreg; + + for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { + pphyreg = &rtlphy->phyreg_def[rfpath]; + switch (rfpath) { + case RF90_PATH_A: + case RF90_PATH_C: + u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV); + break; + case RF90_PATH_B: + case RF90_PATH_D: + u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV << 16); + break; + } + rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); + udelay(1); + rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); + udelay(1); + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, + B3WIREADDREAALENGTH, 0x0); + udelay(1); + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0); + udelay(1); + switch (rfpath) { + case RF90_PATH_A: + rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw, + (enum radio_path) rfpath); + break; + case RF90_PATH_B: + rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw, + (enum radio_path) rfpath); + break; + case RF90_PATH_C: + break; + case RF90_PATH_D: + break; + } + switch (rfpath) { + case RF90_PATH_A: + case RF90_PATH_C: + rtl_set_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV, u4_regvalue); + break; + case RF90_PATH_B: + case RF90_PATH_D: + rtl_set_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV << 16, u4_regvalue); + break; + } + if (rtstatus != true) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + ("Radio[%d] Fail!!", rfpath)); + goto phy_rf_cfg_fail; + } + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n")); + return rtstatus; +phy_rf_cfg_fail: + return rtstatus; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h new file mode 100644 index 000000000000..86c2728cfa00 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.h @@ -0,0 +1,47 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#ifndef __RTL92CU_RF_H__ +#define __RTL92CU_RF_H__ + +#define RF6052_MAX_TX_PWR 0x3F +#define RF6052_MAX_REG 0x3F +#define RF6052_MAX_PATH 2 + +extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, + u8 bandwidth); +extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel); +extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel); +bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw); +bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum radio_path rfpath); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c new file mode 100644 index 000000000000..71244a38d49e --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -0,0 +1,336 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../core.h" +#include "../usb.h" +#include "../efuse.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "mac.h" +#include "dm.h" +#include "rf.h" +#include "sw.h" +#include "trx.h" +#include "led.h" +#include "hw.h" +#include <linux/vmalloc.h> + +MODULE_AUTHOR("Georgia <georgia@realtek.com>"); +MODULE_AUTHOR("Ziv Huang <ziv_huang@realtek.com>"); +MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless"); +MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin"); + +static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.dm_initialgain_enable = 1; + rtlpriv->dm.dm_flag = 0; + rtlpriv->dm.disable_framebursting = 0; + rtlpriv->dm.thermalvalue = 0; + rtlpriv->rtlhal.pfirmware = vmalloc(0x4000); + if (!rtlpriv->rtlhal.pfirmware) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("Can't alloc buffer for fw.\n")); + return 1; + } + return 0; +} + +static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (rtlpriv->rtlhal.pfirmware) { + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + } +} + +static struct rtl_hal_ops rtl8192cu_hal_ops = { + .init_sw_vars = rtl92cu_init_sw_vars, + .deinit_sw_vars = rtl92cu_deinit_sw_vars, + .read_chip_version = rtl92c_read_chip_version, + .read_eeprom_info = rtl92cu_read_eeprom_info, + .enable_interrupt = rtl92c_enable_interrupt, + .disable_interrupt = rtl92c_disable_interrupt, + .hw_init = rtl92cu_hw_init, + .hw_disable = rtl92cu_card_disable, + .set_network_type = rtl92cu_set_network_type, + .set_chk_bssid = rtl92cu_set_check_bssid, + .set_qos = rtl92c_set_qos, + .set_bcn_reg = rtl92cu_set_beacon_related_registers, + .set_bcn_intv = rtl92cu_set_beacon_interval, + .update_interrupt_mask = rtl92cu_update_interrupt_mask, + .get_hw_reg = rtl92cu_get_hw_reg, + .set_hw_reg = rtl92cu_set_hw_reg, + .update_rate_table = rtl92cu_update_hal_rate_table, + .update_rate_mask = rtl92cu_update_hal_rate_mask, + .fill_tx_desc = rtl92cu_tx_fill_desc, + .fill_fake_txdesc = rtl92cu_fill_fake_txdesc, + .fill_tx_cmddesc = rtl92cu_tx_fill_cmddesc, + .cmd_send_packet = rtl92cu_cmd_send_packet, + .query_rx_desc = rtl92cu_rx_query_desc, + .set_channel_access = rtl92cu_update_channel_access_setting, + .radio_onoff_checking = rtl92cu_gpio_radio_on_off_checking, + .set_bw_mode = rtl92c_phy_set_bw_mode, + .switch_channel = rtl92c_phy_sw_chnl, + .dm_watchdog = rtl92c_dm_watchdog, + .scan_operation_backup = rtl92c_phy_scan_operation_backup, + .set_rf_power_state = rtl92cu_phy_set_rf_power_state, + .led_control = rtl92cu_led_control, + .enable_hw_sec = rtl92cu_enable_hw_security_config, + .set_key = rtl92c_set_key, + .init_sw_leds = rtl92cu_init_sw_leds, + .deinit_sw_leds = rtl92cu_deinit_sw_leds, + .get_bbreg = rtl92c_phy_query_bb_reg, + .set_bbreg = rtl92c_phy_set_bb_reg, + .get_rfreg = rtl92cu_phy_query_rf_reg, + .set_rfreg = rtl92cu_phy_set_rf_reg, + .phy_rf6052_config = rtl92cu_phy_rf6052_config, + .phy_rf6052_set_cck_txpower = rtl92cu_phy_rf6052_set_cck_txpower, + .phy_rf6052_set_ofdm_txpower = rtl92cu_phy_rf6052_set_ofdm_txpower, + .config_bb_with_headerfile = _rtl92cu_phy_config_bb_with_headerfile, + .config_bb_with_pgheaderfile = _rtl92cu_phy_config_bb_with_pgheaderfile, + .phy_lc_calibrate = _rtl92cu_phy_lc_calibrate, + .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback, + .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower, +}; + +static struct rtl_mod_params rtl92cu_mod_params = { + .sw_crypto = 0, +}; + +static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = { + /* rx */ + .in_ep_num = RTL92C_USB_BULK_IN_NUM, + .rx_urb_num = RTL92C_NUM_RX_URBS, + .rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER, + .usb_rx_hdl = rtl8192cu_rx_hdl, + .usb_rx_segregate_hdl = NULL, /* rtl8192c_rx_segregate_hdl; */ + /* tx */ + .usb_tx_cleanup = rtl8192c_tx_cleanup, + .usb_tx_post_hdl = rtl8192c_tx_post_hdl, + .usb_tx_aggregate_hdl = rtl8192c_tx_aggregate_hdl, + /* endpoint mapping */ + .usb_endpoint_mapping = rtl8192cu_endpoint_mapping, + .usb_mq_to_hwq = rtl8192cu_mq_to_hwq, +}; + +static struct rtl_hal_cfg rtl92cu_hal_cfg = { + .name = "rtl92c_usb", + .fw_name = "rtlwifi/rtl8192cufw.bin", + .ops = &rtl8192cu_hal_ops, + .mod_params = &rtl92cu_mod_params, + .usb_interface_cfg = &rtl92cu_interface_cfg, + + .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, + .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, + .maps[SYS_CLK] = REG_SYS_CLKR, + .maps[MAC_RCR_AM] = AM, + .maps[MAC_RCR_AB] = AB, + .maps[MAC_RCR_ACRC32] = ACRC32, + .maps[MAC_RCR_ACF] = ACF, + .maps[MAC_RCR_AAP] = AAP, + + .maps[EFUSE_TEST] = REG_EFUSE_TEST, + .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_CLK] = 0, + .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_PWC_EV12V] = PWC_EV12V, + .maps[EFUSE_FEN_ELDR] = FEN_ELDR, + .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN, + .maps[EFUSE_ANA8M] = EFUSE_ANA8M, + .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE, + .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION, + .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN, + + .maps[RWCAM] = REG_CAMCMD, + .maps[WCAMI] = REG_CAMWRITE, + .maps[RCAMO] = REG_CAMREAD, + .maps[CAMDBG] = REG_CAMDBG, + .maps[SECR] = REG_SECCFG, + .maps[SEC_CAM_NONE] = CAM_NONE, + .maps[SEC_CAM_WEP40] = CAM_WEP40, + .maps[SEC_CAM_TKIP] = CAM_TKIP, + .maps[SEC_CAM_AES] = CAM_AES, + .maps[SEC_CAM_WEP104] = CAM_WEP104, + + .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6, + .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5, + .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4, + .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3, + .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2, + .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1, + .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, + .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7, + .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6, + .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5, + .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4, + .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3, + .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2, + .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1, + .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2, + .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1, + + .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, + .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, + .maps[RTL_IMR_BcnInt] = IMR_BCNINT, + .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, + .maps[RTL_IMR_RDU] = IMR_RDU, + .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, + .maps[RTL_IMR_BDOK] = IMR_BDOK, + .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK, + .maps[RTL_IMR_TBDER] = IMR_TBDER, + .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK, + .maps[RTL_IMR_TBDOK] = IMR_TBDOK, + .maps[RTL_IMR_BKDOK] = IMR_BKDOK, + .maps[RTL_IMR_BEDOK] = IMR_BEDOK, + .maps[RTL_IMR_VIDOK] = IMR_VIDOK, + .maps[RTL_IMR_VODOK] = IMR_VODOK, + .maps[RTL_IMR_ROK] = IMR_ROK, + .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER), + + .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M, + .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M, + .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M, + .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M, + .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M, + .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M, + .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M, + .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M, + .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M, + .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M, + .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M, + .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M, + .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7, + .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, +}; + +#define USB_VENDER_ID_REALTEK 0x0bda + +/* 2010-10-19 DID_USB_V3.4 */ +static struct usb_device_id rtl8192c_usb_ids[] = { + + /*=== Realtek demoboard ===*/ + /* Default ID */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)}, + + /****** 8188CU ********/ + /* 8188CE-VAU USB minCard */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)}, + /* 8188cu 1*1 dongle */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8176, rtl92cu_hal_cfg)}, + /* 8188cu 1*1 dongle, (b/g mode only) */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)}, + /* 8188cu Slim Solo */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817a, rtl92cu_hal_cfg)}, + /* 8188cu Slim Combo */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)}, + /* 8188RU High-power USB Dongle */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817d, rtl92cu_hal_cfg)}, + /* 8188CE-VAU USB minCard (b/g mode only) */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)}, + /* 8188 Combo for BC4 */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)}, + + /****** 8192CU ********/ + /* 8191cu 1*2 */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)}, + /* 8192cu 2*2 */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)}, + /* 8192CE-VAU USB minCard */ + {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)}, + + /*=== Customer ID ===*/ + /****** 8188CU ********/ + {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ + {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ + {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ + {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ + {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ + {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ + /* HP - Lite-On ,8188CUS Slim Combo */ + {RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)}, + {RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/ + {RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/ + {RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/ + {RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/ + {RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/ + {RTL_USB_DEVICE(0x3358, 0x13d3, rtl92cu_hal_cfg)}, /*Azwave 8188CE-VAU*/ + /* Russian customer -Azwave (8188CE-VAU b/g mode only) */ + {RTL_USB_DEVICE(0x3359, 0x13d3, rtl92cu_hal_cfg)}, + + /****** 8192CU ********/ + {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ + {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ + {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ + {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Abocom -Abocom*/ + {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ + {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ + {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ + {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ + {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/ + {} +}; + +MODULE_DEVICE_TABLE(usb, rtl8192c_usb_ids); + +static struct usb_driver rtl8192cu_driver = { + .name = "rtl8192cu", + .probe = rtl_usb_probe, + .disconnect = rtl_usb_disconnect, + .id_table = rtl8192c_usb_ids, + +#ifdef CONFIG_PM + /* .suspend = rtl_usb_suspend, */ + /* .resume = rtl_usb_resume, */ + /* .reset_resume = rtl8192c_resume, */ +#endif /* CONFIG_PM */ +#ifdef CONFIG_AUTOSUSPEND + .supports_autosuspend = 1, +#endif +}; + +static int __init rtl8192cu_init(void) +{ + return usb_register(&rtl8192cu_driver); +} + +static void __exit rtl8192cu_exit(void) +{ + usb_deregister(&rtl8192cu_driver); +} + +module_init(rtl8192cu_init); +module_exit(rtl8192cu_exit); diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h new file mode 100644 index 000000000000..43b1177924ab --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h @@ -0,0 +1,53 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#ifndef __RTL92CU_SW_H__ +#define __RTL92CU_SW_H__ + +#define EFUSE_MAX_SECTION 16 + +void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, + u8 *powerlevel); +void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel); +bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, + u8 configtype); +bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, + u8 configtype); +void _rtl92cu_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t); +void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, + u32 regaddr, u32 bitmask, u32 data); +bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state); +u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, + enum radio_path rfpath, u32 regaddr, u32 bitmask); +void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.c b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c new file mode 100644 index 000000000000..d57ef5e88a9e --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.c @@ -0,0 +1,1888 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "table.h" + +u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH] = { + 0x024, 0x0011800f, + 0x028, 0x00ffdb83, + 0x800, 0x80040002, + 0x804, 0x00000003, + 0x808, 0x0000fc00, + 0x80c, 0x0000000a, + 0x810, 0x10005388, + 0x814, 0x020c3d10, + 0x818, 0x02200385, + 0x81c, 0x00000000, + 0x820, 0x01000100, + 0x824, 0x00390004, + 0x828, 0x01000100, + 0x82c, 0x00390004, + 0x830, 0x27272727, + 0x834, 0x27272727, + 0x838, 0x27272727, + 0x83c, 0x27272727, + 0x840, 0x00010000, + 0x844, 0x00010000, + 0x848, 0x27272727, + 0x84c, 0x27272727, + 0x850, 0x00000000, + 0x854, 0x00000000, + 0x858, 0x569a569a, + 0x85c, 0x0c1b25a4, + 0x860, 0x66e60230, + 0x864, 0x061f0130, + 0x868, 0x27272727, + 0x86c, 0x2b2b2b27, + 0x870, 0x07000700, + 0x874, 0x22184000, + 0x878, 0x08080808, + 0x87c, 0x00000000, + 0x880, 0xc0083070, + 0x884, 0x000004d5, + 0x888, 0x00000000, + 0x88c, 0xcc0000c0, + 0x890, 0x00000800, + 0x894, 0xfffffffe, + 0x898, 0x40302010, + 0x89c, 0x00706050, + 0x900, 0x00000000, + 0x904, 0x00000023, + 0x908, 0x00000000, + 0x90c, 0x81121313, + 0xa00, 0x00d047c8, + 0xa04, 0x80ff000c, + 0xa08, 0x8c838300, + 0xa0c, 0x2e68120f, + 0xa10, 0x9500bb78, + 0xa14, 0x11144028, + 0xa18, 0x00881117, + 0xa1c, 0x89140f00, + 0xa20, 0x1a1b0000, + 0xa24, 0x090e1317, + 0xa28, 0x00000204, + 0xa2c, 0x00d30000, + 0xa70, 0x101fbf00, + 0xa74, 0x00000007, + 0xc00, 0x48071d40, + 0xc04, 0x03a05633, + 0xc08, 0x000000e4, + 0xc0c, 0x6c6c6c6c, + 0xc10, 0x08800000, + 0xc14, 0x40000100, + 0xc18, 0x08800000, + 0xc1c, 0x40000100, + 0xc20, 0x00000000, + 0xc24, 0x00000000, + 0xc28, 0x00000000, + 0xc2c, 0x00000000, + 0xc30, 0x69e9ac44, + 0xc34, 0x469652cf, + 0xc38, 0x49795994, + 0xc3c, 0x0a97971c, + 0xc40, 0x1f7c403f, + 0xc44, 0x000100b7, + 0xc48, 0xec020107, + 0xc4c, 0x007f037f, + 0xc50, 0x6954341e, + 0xc54, 0x43bc0094, + 0xc58, 0x6954341e, + 0xc5c, 0x433c0094, + 0xc60, 0x00000000, + 0xc64, 0x5116848b, + 0xc68, 0x47c00bff, + 0xc6c, 0x00000036, + 0xc70, 0x2c7f000d, + 0xc74, 0x0186115b, + 0xc78, 0x0000001f, + 0xc7c, 0x00b99612, + 0xc80, 0x40000100, + 0xc84, 0x20f60000, + 0xc88, 0x40000100, + 0xc8c, 0x20200000, + 0xc90, 0x00121820, + 0xc94, 0x00000000, + 0xc98, 0x00121820, + 0xc9c, 0x00007f7f, + 0xca0, 0x00000000, + 0xca4, 0x00000080, + 0xca8, 0x00000000, + 0xcac, 0x00000000, + 0xcb0, 0x00000000, + 0xcb4, 0x00000000, + 0xcb8, 0x00000000, + 0xcbc, 0x28000000, + 0xcc0, 0x00000000, + 0xcc4, 0x00000000, + 0xcc8, 0x00000000, + 0xccc, 0x00000000, + 0xcd0, 0x00000000, + 0xcd4, 0x00000000, + 0xcd8, 0x64b22427, + 0xcdc, 0x00766932, + 0xce0, 0x00222222, + 0xce4, 0x00000000, + 0xce8, 0x37644302, + 0xcec, 0x2f97d40c, + 0xd00, 0x00080740, + 0xd04, 0x00020403, + 0xd08, 0x0000907f, + 0xd0c, 0x20010201, + 0xd10, 0xa0633333, + 0xd14, 0x3333bc43, + 0xd18, 0x7a8f5b6b, + 0xd2c, 0xcc979975, + 0xd30, 0x00000000, + 0xd34, 0x80608000, + 0xd38, 0x00000000, + 0xd3c, 0x00027293, + 0xd40, 0x00000000, + 0xd44, 0x00000000, + 0xd48, 0x00000000, + 0xd4c, 0x00000000, + 0xd50, 0x6437140a, + 0xd54, 0x00000000, + 0xd58, 0x00000000, + 0xd5c, 0x30032064, + 0xd60, 0x4653de68, + 0xd64, 0x04518a3c, + 0xd68, 0x00002101, + 0xd6c, 0x2a201c16, + 0xd70, 0x1812362e, + 0xd74, 0x322c2220, + 0xd78, 0x000e3c24, + 0xe00, 0x2a2a2a2a, + 0xe04, 0x2a2a2a2a, + 0xe08, 0x03902a2a, + 0xe10, 0x2a2a2a2a, + 0xe14, 0x2a2a2a2a, + 0xe18, 0x2a2a2a2a, + 0xe1c, 0x2a2a2a2a, + 0xe28, 0x00000000, + 0xe30, 0x1000dc1f, + 0xe34, 0x10008c1f, + 0xe38, 0x02140102, + 0xe3c, 0x681604c2, + 0xe40, 0x01007c00, + 0xe44, 0x01004800, + 0xe48, 0xfb000000, + 0xe4c, 0x000028d1, + 0xe50, 0x1000dc1f, + 0xe54, 0x10008c1f, + 0xe58, 0x02140102, + 0xe5c, 0x28160d05, + 0xe60, 0x00000010, + 0xe68, 0x001b25a4, + 0xe6c, 0x63db25a4, + 0xe70, 0x63db25a4, + 0xe74, 0x0c1b25a4, + 0xe78, 0x0c1b25a4, + 0xe7c, 0x0c1b25a4, + 0xe80, 0x0c1b25a4, + 0xe84, 0x63db25a4, + 0xe88, 0x0c1b25a4, + 0xe8c, 0x63db25a4, + 0xed0, 0x63db25a4, + 0xed4, 0x63db25a4, + 0xed8, 0x63db25a4, + 0xedc, 0x001b25a4, + 0xee0, 0x001b25a4, + 0xeec, 0x6fdb25a4, + 0xf14, 0x00000003, + 0xf4c, 0x00000000, + 0xf00, 0x00000300, +}; + +u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH] = { + 0x024, 0x0011800f, + 0x028, 0x00ffdb83, + 0x800, 0x80040000, + 0x804, 0x00000001, + 0x808, 0x0000fc00, + 0x80c, 0x0000000a, + 0x810, 0x10005388, + 0x814, 0x020c3d10, + 0x818, 0x02200385, + 0x81c, 0x00000000, + 0x820, 0x01000100, + 0x824, 0x00390004, + 0x828, 0x00000000, + 0x82c, 0x00000000, + 0x830, 0x00000000, + 0x834, 0x00000000, + 0x838, 0x00000000, + 0x83c, 0x00000000, + 0x840, 0x00010000, + 0x844, 0x00000000, + 0x848, 0x00000000, + 0x84c, 0x00000000, + 0x850, 0x00000000, + 0x854, 0x00000000, + 0x858, 0x569a569a, + 0x85c, 0x001b25a4, + 0x860, 0x66e60230, + 0x864, 0x061f0130, + 0x868, 0x00000000, + 0x86c, 0x32323200, + 0x870, 0x07000700, + 0x874, 0x22004000, + 0x878, 0x00000808, + 0x87c, 0x00000000, + 0x880, 0xc0083070, + 0x884, 0x000004d5, + 0x888, 0x00000000, + 0x88c, 0xccc000c0, + 0x890, 0x00000800, + 0x894, 0xfffffffe, + 0x898, 0x40302010, + 0x89c, 0x00706050, + 0x900, 0x00000000, + 0x904, 0x00000023, + 0x908, 0x00000000, + 0x90c, 0x81121111, + 0xa00, 0x00d047c8, + 0xa04, 0x80ff000c, + 0xa08, 0x8c838300, + 0xa0c, 0x2e68120f, + 0xa10, 0x9500bb78, + 0xa14, 0x11144028, + 0xa18, 0x00881117, + 0xa1c, 0x89140f00, + 0xa20, 0x1a1b0000, + 0xa24, 0x090e1317, + 0xa28, 0x00000204, + 0xa2c, 0x00d30000, + 0xa70, 0x101fbf00, + 0xa74, 0x00000007, + 0xc00, 0x48071d40, + 0xc04, 0x03a05611, + 0xc08, 0x000000e4, + 0xc0c, 0x6c6c6c6c, + 0xc10, 0x08800000, + 0xc14, 0x40000100, + 0xc18, 0x08800000, + 0xc1c, 0x40000100, + 0xc20, 0x00000000, + 0xc24, 0x00000000, + 0xc28, 0x00000000, + 0xc2c, 0x00000000, + 0xc30, 0x69e9ac44, + 0xc34, 0x469652cf, + 0xc38, 0x49795994, + 0xc3c, 0x0a97971c, + 0xc40, 0x1f7c403f, + 0xc44, 0x000100b7, + 0xc48, 0xec020107, + 0xc4c, 0x007f037f, + 0xc50, 0x6954341e, + 0xc54, 0x43bc0094, + 0xc58, 0x6954341e, + 0xc5c, 0x433c0094, + 0xc60, 0x00000000, + 0xc64, 0x5116848b, + 0xc68, 0x47c00bff, + 0xc6c, 0x00000036, + 0xc70, 0x2c7f000d, + 0xc74, 0x018610db, + 0xc78, 0x0000001f, + 0xc7c, 0x00b91612, + 0xc80, 0x40000100, + 0xc84, 0x20f60000, + 0xc88, 0x40000100, + 0xc8c, 0x20200000, + 0xc90, 0x00121820, + 0xc94, 0x00000000, + 0xc98, 0x00121820, + 0xc9c, 0x00007f7f, + 0xca0, 0x00000000, + 0xca4, 0x00000080, + 0xca8, 0x00000000, + 0xcac, 0x00000000, + 0xcb0, 0x00000000, + 0xcb4, 0x00000000, + 0xcb8, 0x00000000, + 0xcbc, 0x28000000, + 0xcc0, 0x00000000, + 0xcc4, 0x00000000, + 0xcc8, 0x00000000, + 0xccc, 0x00000000, + 0xcd0, 0x00000000, + 0xcd4, 0x00000000, + 0xcd8, 0x64b22427, + 0xcdc, 0x00766932, + 0xce0, 0x00222222, + 0xce4, 0x00000000, + 0xce8, 0x37644302, + 0xcec, 0x2f97d40c, + 0xd00, 0x00080740, + 0xd04, 0x00020401, + 0xd08, 0x0000907f, + 0xd0c, 0x20010201, + 0xd10, 0xa0633333, + 0xd14, 0x3333bc43, + 0xd18, 0x7a8f5b6b, + 0xd2c, 0xcc979975, + 0xd30, 0x00000000, + 0xd34, 0x80608000, + 0xd38, 0x00000000, + 0xd3c, 0x00027293, + 0xd40, 0x00000000, + 0xd44, 0x00000000, + 0xd48, 0x00000000, + 0xd4c, 0x00000000, + 0xd50, 0x6437140a, + 0xd54, 0x00000000, + 0xd58, 0x00000000, + 0xd5c, 0x30032064, + 0xd60, 0x4653de68, + 0xd64, 0x04518a3c, + 0xd68, 0x00002101, + 0xd6c, 0x2a201c16, + 0xd70, 0x1812362e, + 0xd74, 0x322c2220, + 0xd78, 0x000e3c24, + 0xe00, 0x2a2a2a2a, + 0xe04, 0x2a2a2a2a, + 0xe08, 0x03902a2a, + 0xe10, 0x2a2a2a2a, + 0xe14, 0x2a2a2a2a, + 0xe18, 0x2a2a2a2a, + 0xe1c, 0x2a2a2a2a, + 0xe28, 0x00000000, + 0xe30, 0x1000dc1f, + 0xe34, 0x10008c1f, + 0xe38, 0x02140102, + 0xe3c, 0x681604c2, + 0xe40, 0x01007c00, + 0xe44, 0x01004800, + 0xe48, 0xfb000000, + 0xe4c, 0x000028d1, + 0xe50, 0x1000dc1f, + 0xe54, 0x10008c1f, + 0xe58, 0x02140102, + 0xe5c, 0x28160d05, + 0xe60, 0x00000008, + 0xe68, 0x001b25a4, + 0xe6c, 0x631b25a0, + 0xe70, 0x631b25a0, + 0xe74, 0x081b25a0, + 0xe78, 0x081b25a0, + 0xe7c, 0x081b25a0, + 0xe80, 0x081b25a0, + 0xe84, 0x631b25a0, + 0xe88, 0x081b25a0, + 0xe8c, 0x631b25a0, + 0xed0, 0x631b25a0, + 0xed4, 0x631b25a0, + 0xed8, 0x631b25a0, + 0xedc, 0x001b25a0, + 0xee0, 0x001b25a0, + 0xeec, 0x6b1b25a0, + 0xf14, 0x00000003, + 0xf4c, 0x00000000, + 0xf00, 0x00000300, +}; + +u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH] = { + 0xe00, 0xffffffff, 0x07090c0c, + 0xe04, 0xffffffff, 0x01020405, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x0b0c0c0e, + 0xe14, 0xffffffff, 0x01030506, + 0xe18, 0xffffffff, 0x0b0c0d0e, + 0xe1c, 0xffffffff, 0x01030509, + 0x830, 0xffffffff, 0x07090c0c, + 0x834, 0xffffffff, 0x01020405, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x0b0c0d0e, + 0x848, 0xffffffff, 0x01030509, + 0x84c, 0xffffffff, 0x0b0c0d0e, + 0x868, 0xffffffff, 0x01030509, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x04040404, + 0xe04, 0xffffffff, 0x00020204, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x06060606, + 0xe14, 0xffffffff, 0x00020406, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x04040404, + 0x834, 0xffffffff, 0x00020204, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x06060606, + 0x848, 0xffffffff, 0x00020406, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x04040404, + 0xe04, 0xffffffff, 0x00020204, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x04040404, + 0x834, 0xffffffff, 0x00020204, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, +}; + +u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH] = { + 0x000, 0x00030159, + 0x001, 0x00031284, + 0x002, 0x00098000, + 0x003, 0x00018c63, + 0x004, 0x000210e7, + 0x009, 0x0002044f, + 0x00a, 0x0001adb1, + 0x00b, 0x00054867, + 0x00c, 0x0008992e, + 0x00d, 0x0000e52c, + 0x00e, 0x00039ce7, + 0x00f, 0x00000451, + 0x019, 0x00000000, + 0x01a, 0x00010255, + 0x01b, 0x00060a00, + 0x01c, 0x000fc378, + 0x01d, 0x000a1250, + 0x01e, 0x0004445f, + 0x01f, 0x00080001, + 0x020, 0x0000b614, + 0x021, 0x0006c000, + 0x022, 0x00000000, + 0x023, 0x00001558, + 0x024, 0x00000060, + 0x025, 0x00000483, + 0x026, 0x0004f000, + 0x027, 0x000ec7d9, + 0x028, 0x000577c0, + 0x029, 0x00004783, + 0x02a, 0x00000001, + 0x02b, 0x00021334, + 0x02a, 0x00000000, + 0x02b, 0x00000054, + 0x02a, 0x00000001, + 0x02b, 0x00000808, + 0x02b, 0x00053333, + 0x02c, 0x0000000c, + 0x02a, 0x00000002, + 0x02b, 0x00000808, + 0x02b, 0x0005b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000003, + 0x02b, 0x00000808, + 0x02b, 0x00063333, + 0x02c, 0x0000000d, + 0x02a, 0x00000004, + 0x02b, 0x00000808, + 0x02b, 0x0006b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000005, + 0x02b, 0x00000808, + 0x02b, 0x00073333, + 0x02c, 0x0000000d, + 0x02a, 0x00000006, + 0x02b, 0x00000709, + 0x02b, 0x0005b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000007, + 0x02b, 0x00000709, + 0x02b, 0x00063333, + 0x02c, 0x0000000d, + 0x02a, 0x00000008, + 0x02b, 0x0000060a, + 0x02b, 0x0004b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000009, + 0x02b, 0x0000060a, + 0x02b, 0x00053333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000a, + 0x02b, 0x0000060a, + 0x02b, 0x0005b333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000b, + 0x02b, 0x0000060a, + 0x02b, 0x00063333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000c, + 0x02b, 0x0000060a, + 0x02b, 0x0006b333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000d, + 0x02b, 0x0000060a, + 0x02b, 0x00073333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000e, + 0x02b, 0x0000050b, + 0x02b, 0x00066666, + 0x02c, 0x0000001a, + 0x02a, 0x000e0000, + 0x010, 0x0004000f, + 0x011, 0x000e31fc, + 0x010, 0x0006000f, + 0x011, 0x000ff9f8, + 0x010, 0x0002000f, + 0x011, 0x000203f9, + 0x010, 0x0003000f, + 0x011, 0x000ff500, + 0x010, 0x00000000, + 0x011, 0x00000000, + 0x010, 0x0008000f, + 0x011, 0x0003f100, + 0x010, 0x0009000f, + 0x011, 0x00023100, + 0x012, 0x00032000, + 0x012, 0x00071000, + 0x012, 0x000b0000, + 0x012, 0x000fc000, + 0x013, 0x000287af, + 0x013, 0x000244b7, + 0x013, 0x000204ab, + 0x013, 0x0001c49f, + 0x013, 0x00018493, + 0x013, 0x00014297, + 0x013, 0x00010295, + 0x013, 0x0000c298, + 0x013, 0x0000819c, + 0x013, 0x000040a8, + 0x013, 0x0000001c, + 0x014, 0x0001944c, + 0x014, 0x00059444, + 0x014, 0x0009944c, + 0x014, 0x000d9444, + 0x015, 0x0000f424, + 0x015, 0x0004f424, + 0x015, 0x0008f424, + 0x015, 0x000cf424, + 0x016, 0x000e0330, + 0x016, 0x000a0330, + 0x016, 0x00060330, + 0x016, 0x00020330, + 0x000, 0x00010159, + 0x018, 0x0000f401, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x01f, 0x00080003, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x01e, 0x00044457, + 0x01f, 0x00080000, + 0x000, 0x00030159, +}; + +u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH] = { + 0x000, 0x00030159, + 0x001, 0x00031284, + 0x002, 0x00098000, + 0x003, 0x00018c63, + 0x004, 0x000210e7, + 0x009, 0x0002044f, + 0x00a, 0x0001adb1, + 0x00b, 0x00054867, + 0x00c, 0x0008992e, + 0x00d, 0x0000e52c, + 0x00e, 0x00039ce7, + 0x00f, 0x00000451, + 0x012, 0x00032000, + 0x012, 0x00071000, + 0x012, 0x000b0000, + 0x012, 0x000fc000, + 0x013, 0x000287af, + 0x013, 0x000244b7, + 0x013, 0x000204ab, + 0x013, 0x0001c49f, + 0x013, 0x00018493, + 0x013, 0x00014297, + 0x013, 0x00010295, + 0x013, 0x0000c298, + 0x013, 0x0000819c, + 0x013, 0x000040a8, + 0x013, 0x0000001c, + 0x014, 0x0001944c, + 0x014, 0x00059444, + 0x014, 0x0009944c, + 0x014, 0x000d9444, + 0x015, 0x0000f424, + 0x015, 0x0004f424, + 0x015, 0x0008f424, + 0x015, 0x000cf424, + 0x016, 0x000e0330, + 0x016, 0x000a0330, + 0x016, 0x00060330, + 0x016, 0x00020330, +}; + +u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH] = { + 0x000, 0x00030159, + 0x001, 0x00031284, + 0x002, 0x00098000, + 0x003, 0x00018c63, + 0x004, 0x000210e7, + 0x009, 0x0002044f, + 0x00a, 0x0001adb1, + 0x00b, 0x00054867, + 0x00c, 0x0008992e, + 0x00d, 0x0000e52c, + 0x00e, 0x00039ce7, + 0x00f, 0x00000451, + 0x019, 0x00000000, + 0x01a, 0x00010255, + 0x01b, 0x00060a00, + 0x01c, 0x000fc378, + 0x01d, 0x000a1250, + 0x01e, 0x0004445f, + 0x01f, 0x00080001, + 0x020, 0x0000b614, + 0x021, 0x0006c000, + 0x022, 0x00000000, + 0x023, 0x00001558, + 0x024, 0x00000060, + 0x025, 0x00000483, + 0x026, 0x0004f000, + 0x027, 0x000ec7d9, + 0x028, 0x000577c0, + 0x029, 0x00004783, + 0x02a, 0x00000001, + 0x02b, 0x00021334, + 0x02a, 0x00000000, + 0x02b, 0x00000054, + 0x02a, 0x00000001, + 0x02b, 0x00000808, + 0x02b, 0x00053333, + 0x02c, 0x0000000c, + 0x02a, 0x00000002, + 0x02b, 0x00000808, + 0x02b, 0x0005b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000003, + 0x02b, 0x00000808, + 0x02b, 0x00063333, + 0x02c, 0x0000000d, + 0x02a, 0x00000004, + 0x02b, 0x00000808, + 0x02b, 0x0006b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000005, + 0x02b, 0x00000808, + 0x02b, 0x00073333, + 0x02c, 0x0000000d, + 0x02a, 0x00000006, + 0x02b, 0x00000709, + 0x02b, 0x0005b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000007, + 0x02b, 0x00000709, + 0x02b, 0x00063333, + 0x02c, 0x0000000d, + 0x02a, 0x00000008, + 0x02b, 0x0000060a, + 0x02b, 0x0004b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000009, + 0x02b, 0x0000060a, + 0x02b, 0x00053333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000a, + 0x02b, 0x0000060a, + 0x02b, 0x0005b333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000b, + 0x02b, 0x0000060a, + 0x02b, 0x00063333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000c, + 0x02b, 0x0000060a, + 0x02b, 0x0006b333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000d, + 0x02b, 0x0000060a, + 0x02b, 0x00073333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000e, + 0x02b, 0x0000050b, + 0x02b, 0x00066666, + 0x02c, 0x0000001a, + 0x02a, 0x000e0000, + 0x010, 0x0004000f, + 0x011, 0x000e31fc, + 0x010, 0x0006000f, + 0x011, 0x000ff9f8, + 0x010, 0x0002000f, + 0x011, 0x000203f9, + 0x010, 0x0003000f, + 0x011, 0x000ff500, + 0x010, 0x00000000, + 0x011, 0x00000000, + 0x010, 0x0008000f, + 0x011, 0x0003f100, + 0x010, 0x0009000f, + 0x011, 0x00023100, + 0x012, 0x00032000, + 0x012, 0x00071000, + 0x012, 0x000b0000, + 0x012, 0x000fc000, + 0x013, 0x000287b3, + 0x013, 0x000244b7, + 0x013, 0x000204ab, + 0x013, 0x0001c49f, + 0x013, 0x00018493, + 0x013, 0x0001429b, + 0x013, 0x00010299, + 0x013, 0x0000c29c, + 0x013, 0x000081a0, + 0x013, 0x000040ac, + 0x013, 0x00000020, + 0x014, 0x0001944c, + 0x014, 0x00059444, + 0x014, 0x0009944c, + 0x014, 0x000d9444, + 0x015, 0x0000f405, + 0x015, 0x0004f405, + 0x015, 0x0008f405, + 0x015, 0x000cf405, + 0x016, 0x000e0330, + 0x016, 0x000a0330, + 0x016, 0x00060330, + 0x016, 0x00020330, + 0x000, 0x00010159, + 0x018, 0x0000f401, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x01f, 0x00080003, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x01e, 0x00044457, + 0x01f, 0x00080000, + 0x000, 0x00030159, +}; + +u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH] = { + 0x0, +}; + +u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH] = { + 0x420, 0x00000080, + 0x423, 0x00000000, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000006, + 0x437, 0x00000007, + 0x438, 0x00000000, + 0x439, 0x00000000, + 0x43a, 0x00000000, + 0x43b, 0x00000001, + 0x43c, 0x00000004, + 0x43d, 0x00000005, + 0x43e, 0x00000006, + 0x43f, 0x00000007, + 0x440, 0x0000005d, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000015, + 0x445, 0x000000f0, + 0x446, 0x0000000f, + 0x447, 0x00000000, + 0x458, 0x00000041, + 0x459, 0x000000a8, + 0x45a, 0x00000072, + 0x45b, 0x000000b9, + 0x460, 0x00000066, + 0x461, 0x00000066, + 0x462, 0x00000008, + 0x463, 0x00000003, + 0x4c8, 0x000000ff, + 0x4c9, 0x00000008, + 0x4cc, 0x000000ff, + 0x4cd, 0x000000ff, + 0x4ce, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000a2, + 0x502, 0x0000002f, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000a3, + 0x506, 0x0000005e, + 0x507, 0x00000000, + 0x508, 0x0000002b, + 0x509, 0x000000a4, + 0x50a, 0x0000005e, + 0x50b, 0x00000000, + 0x50c, 0x0000004f, + 0x50d, 0x000000a4, + 0x50e, 0x00000000, + 0x50f, 0x00000000, + 0x512, 0x0000001c, + 0x514, 0x0000000a, + 0x515, 0x00000010, + 0x516, 0x0000000a, + 0x517, 0x00000010, + 0x51a, 0x00000016, + 0x524, 0x0000000f, + 0x525, 0x0000004f, + 0x546, 0x00000040, + 0x547, 0x00000000, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55a, 0x00000002, + 0x55d, 0x000000ff, + 0x605, 0x00000030, + 0x608, 0x0000000e, + 0x609, 0x0000002a, + 0x652, 0x00000020, + 0x63c, 0x0000000a, + 0x63d, 0x0000000e, + 0x63e, 0x0000000a, + 0x63f, 0x0000000e, + 0x66e, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70a, 0x00000065, + 0x70b, 0x00000087, +}; + +u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH] = { + 0xc78, 0x7b000001, + 0xc78, 0x7b010001, + 0xc78, 0x7b020001, + 0xc78, 0x7b030001, + 0xc78, 0x7b040001, + 0xc78, 0x7b050001, + 0xc78, 0x7a060001, + 0xc78, 0x79070001, + 0xc78, 0x78080001, + 0xc78, 0x77090001, + 0xc78, 0x760a0001, + 0xc78, 0x750b0001, + 0xc78, 0x740c0001, + 0xc78, 0x730d0001, + 0xc78, 0x720e0001, + 0xc78, 0x710f0001, + 0xc78, 0x70100001, + 0xc78, 0x6f110001, + 0xc78, 0x6e120001, + 0xc78, 0x6d130001, + 0xc78, 0x6c140001, + 0xc78, 0x6b150001, + 0xc78, 0x6a160001, + 0xc78, 0x69170001, + 0xc78, 0x68180001, + 0xc78, 0x67190001, + 0xc78, 0x661a0001, + 0xc78, 0x651b0001, + 0xc78, 0x641c0001, + 0xc78, 0x631d0001, + 0xc78, 0x621e0001, + 0xc78, 0x611f0001, + 0xc78, 0x60200001, + 0xc78, 0x49210001, + 0xc78, 0x48220001, + 0xc78, 0x47230001, + 0xc78, 0x46240001, + 0xc78, 0x45250001, + 0xc78, 0x44260001, + 0xc78, 0x43270001, + 0xc78, 0x42280001, + 0xc78, 0x41290001, + 0xc78, 0x402a0001, + 0xc78, 0x262b0001, + 0xc78, 0x252c0001, + 0xc78, 0x242d0001, + 0xc78, 0x232e0001, + 0xc78, 0x222f0001, + 0xc78, 0x21300001, + 0xc78, 0x20310001, + 0xc78, 0x06320001, + 0xc78, 0x05330001, + 0xc78, 0x04340001, + 0xc78, 0x03350001, + 0xc78, 0x02360001, + 0xc78, 0x01370001, + 0xc78, 0x00380001, + 0xc78, 0x00390001, + 0xc78, 0x003a0001, + 0xc78, 0x003b0001, + 0xc78, 0x003c0001, + 0xc78, 0x003d0001, + 0xc78, 0x003e0001, + 0xc78, 0x003f0001, + 0xc78, 0x7b400001, + 0xc78, 0x7b410001, + 0xc78, 0x7b420001, + 0xc78, 0x7b430001, + 0xc78, 0x7b440001, + 0xc78, 0x7b450001, + 0xc78, 0x7a460001, + 0xc78, 0x79470001, + 0xc78, 0x78480001, + 0xc78, 0x77490001, + 0xc78, 0x764a0001, + 0xc78, 0x754b0001, + 0xc78, 0x744c0001, + 0xc78, 0x734d0001, + 0xc78, 0x724e0001, + 0xc78, 0x714f0001, + 0xc78, 0x70500001, + 0xc78, 0x6f510001, + 0xc78, 0x6e520001, + 0xc78, 0x6d530001, + 0xc78, 0x6c540001, + 0xc78, 0x6b550001, + 0xc78, 0x6a560001, + 0xc78, 0x69570001, + 0xc78, 0x68580001, + 0xc78, 0x67590001, + 0xc78, 0x665a0001, + 0xc78, 0x655b0001, + 0xc78, 0x645c0001, + 0xc78, 0x635d0001, + 0xc78, 0x625e0001, + 0xc78, 0x615f0001, + 0xc78, 0x60600001, + 0xc78, 0x49610001, + 0xc78, 0x48620001, + 0xc78, 0x47630001, + 0xc78, 0x46640001, + 0xc78, 0x45650001, + 0xc78, 0x44660001, + 0xc78, 0x43670001, + 0xc78, 0x42680001, + 0xc78, 0x41690001, + 0xc78, 0x406a0001, + 0xc78, 0x266b0001, + 0xc78, 0x256c0001, + 0xc78, 0x246d0001, + 0xc78, 0x236e0001, + 0xc78, 0x226f0001, + 0xc78, 0x21700001, + 0xc78, 0x20710001, + 0xc78, 0x06720001, + 0xc78, 0x05730001, + 0xc78, 0x04740001, + 0xc78, 0x03750001, + 0xc78, 0x02760001, + 0xc78, 0x01770001, + 0xc78, 0x00780001, + 0xc78, 0x00790001, + 0xc78, 0x007a0001, + 0xc78, 0x007b0001, + 0xc78, 0x007c0001, + 0xc78, 0x007d0001, + 0xc78, 0x007e0001, + 0xc78, 0x007f0001, + 0xc78, 0x3800001e, + 0xc78, 0x3801001e, + 0xc78, 0x3802001e, + 0xc78, 0x3803001e, + 0xc78, 0x3804001e, + 0xc78, 0x3805001e, + 0xc78, 0x3806001e, + 0xc78, 0x3807001e, + 0xc78, 0x3808001e, + 0xc78, 0x3c09001e, + 0xc78, 0x3e0a001e, + 0xc78, 0x400b001e, + 0xc78, 0x440c001e, + 0xc78, 0x480d001e, + 0xc78, 0x4c0e001e, + 0xc78, 0x500f001e, + 0xc78, 0x5210001e, + 0xc78, 0x5611001e, + 0xc78, 0x5a12001e, + 0xc78, 0x5e13001e, + 0xc78, 0x6014001e, + 0xc78, 0x6015001e, + 0xc78, 0x6016001e, + 0xc78, 0x6217001e, + 0xc78, 0x6218001e, + 0xc78, 0x6219001e, + 0xc78, 0x621a001e, + 0xc78, 0x621b001e, + 0xc78, 0x621c001e, + 0xc78, 0x621d001e, + 0xc78, 0x621e001e, + 0xc78, 0x621f001e, +}; + +u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH] = { + 0xc78, 0x7b000001, + 0xc78, 0x7b010001, + 0xc78, 0x7b020001, + 0xc78, 0x7b030001, + 0xc78, 0x7b040001, + 0xc78, 0x7b050001, + 0xc78, 0x7a060001, + 0xc78, 0x79070001, + 0xc78, 0x78080001, + 0xc78, 0x77090001, + 0xc78, 0x760a0001, + 0xc78, 0x750b0001, + 0xc78, 0x740c0001, + 0xc78, 0x730d0001, + 0xc78, 0x720e0001, + 0xc78, 0x710f0001, + 0xc78, 0x70100001, + 0xc78, 0x6f110001, + 0xc78, 0x6e120001, + 0xc78, 0x6d130001, + 0xc78, 0x6c140001, + 0xc78, 0x6b150001, + 0xc78, 0x6a160001, + 0xc78, 0x69170001, + 0xc78, 0x68180001, + 0xc78, 0x67190001, + 0xc78, 0x661a0001, + 0xc78, 0x651b0001, + 0xc78, 0x641c0001, + 0xc78, 0x631d0001, + 0xc78, 0x621e0001, + 0xc78, 0x611f0001, + 0xc78, 0x60200001, + 0xc78, 0x49210001, + 0xc78, 0x48220001, + 0xc78, 0x47230001, + 0xc78, 0x46240001, + 0xc78, 0x45250001, + 0xc78, 0x44260001, + 0xc78, 0x43270001, + 0xc78, 0x42280001, + 0xc78, 0x41290001, + 0xc78, 0x402a0001, + 0xc78, 0x262b0001, + 0xc78, 0x252c0001, + 0xc78, 0x242d0001, + 0xc78, 0x232e0001, + 0xc78, 0x222f0001, + 0xc78, 0x21300001, + 0xc78, 0x20310001, + 0xc78, 0x06320001, + 0xc78, 0x05330001, + 0xc78, 0x04340001, + 0xc78, 0x03350001, + 0xc78, 0x02360001, + 0xc78, 0x01370001, + 0xc78, 0x00380001, + 0xc78, 0x00390001, + 0xc78, 0x003a0001, + 0xc78, 0x003b0001, + 0xc78, 0x003c0001, + 0xc78, 0x003d0001, + 0xc78, 0x003e0001, + 0xc78, 0x003f0001, + 0xc78, 0x7b400001, + 0xc78, 0x7b410001, + 0xc78, 0x7b420001, + 0xc78, 0x7b430001, + 0xc78, 0x7b440001, + 0xc78, 0x7b450001, + 0xc78, 0x7a460001, + 0xc78, 0x79470001, + 0xc78, 0x78480001, + 0xc78, 0x77490001, + 0xc78, 0x764a0001, + 0xc78, 0x754b0001, + 0xc78, 0x744c0001, + 0xc78, 0x734d0001, + 0xc78, 0x724e0001, + 0xc78, 0x714f0001, + 0xc78, 0x70500001, + 0xc78, 0x6f510001, + 0xc78, 0x6e520001, + 0xc78, 0x6d530001, + 0xc78, 0x6c540001, + 0xc78, 0x6b550001, + 0xc78, 0x6a560001, + 0xc78, 0x69570001, + 0xc78, 0x68580001, + 0xc78, 0x67590001, + 0xc78, 0x665a0001, + 0xc78, 0x655b0001, + 0xc78, 0x645c0001, + 0xc78, 0x635d0001, + 0xc78, 0x625e0001, + 0xc78, 0x615f0001, + 0xc78, 0x60600001, + 0xc78, 0x49610001, + 0xc78, 0x48620001, + 0xc78, 0x47630001, + 0xc78, 0x46640001, + 0xc78, 0x45650001, + 0xc78, 0x44660001, + 0xc78, 0x43670001, + 0xc78, 0x42680001, + 0xc78, 0x41690001, + 0xc78, 0x406a0001, + 0xc78, 0x266b0001, + 0xc78, 0x256c0001, + 0xc78, 0x246d0001, + 0xc78, 0x236e0001, + 0xc78, 0x226f0001, + 0xc78, 0x21700001, + 0xc78, 0x20710001, + 0xc78, 0x06720001, + 0xc78, 0x05730001, + 0xc78, 0x04740001, + 0xc78, 0x03750001, + 0xc78, 0x02760001, + 0xc78, 0x01770001, + 0xc78, 0x00780001, + 0xc78, 0x00790001, + 0xc78, 0x007a0001, + 0xc78, 0x007b0001, + 0xc78, 0x007c0001, + 0xc78, 0x007d0001, + 0xc78, 0x007e0001, + 0xc78, 0x007f0001, + 0xc78, 0x3800001e, + 0xc78, 0x3801001e, + 0xc78, 0x3802001e, + 0xc78, 0x3803001e, + 0xc78, 0x3804001e, + 0xc78, 0x3805001e, + 0xc78, 0x3806001e, + 0xc78, 0x3807001e, + 0xc78, 0x3808001e, + 0xc78, 0x3c09001e, + 0xc78, 0x3e0a001e, + 0xc78, 0x400b001e, + 0xc78, 0x440c001e, + 0xc78, 0x480d001e, + 0xc78, 0x4c0e001e, + 0xc78, 0x500f001e, + 0xc78, 0x5210001e, + 0xc78, 0x5611001e, + 0xc78, 0x5a12001e, + 0xc78, 0x5e13001e, + 0xc78, 0x6014001e, + 0xc78, 0x6015001e, + 0xc78, 0x6016001e, + 0xc78, 0x6217001e, + 0xc78, 0x6218001e, + 0xc78, 0x6219001e, + 0xc78, 0x621a001e, + 0xc78, 0x621b001e, + 0xc78, 0x621c001e, + 0xc78, 0x621d001e, + 0xc78, 0x621e001e, + 0xc78, 0x621f001e, +}; + +u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength] = { + 0x024, 0x0011800f, + 0x028, 0x00ffdb83, + 0x040, 0x000c0004, + 0x800, 0x80040000, + 0x804, 0x00000001, + 0x808, 0x0000fc00, + 0x80c, 0x0000000a, + 0x810, 0x10005388, + 0x814, 0x020c3d10, + 0x818, 0x02200385, + 0x81c, 0x00000000, + 0x820, 0x01000100, + 0x824, 0x00390204, + 0x828, 0x00000000, + 0x82c, 0x00000000, + 0x830, 0x00000000, + 0x834, 0x00000000, + 0x838, 0x00000000, + 0x83c, 0x00000000, + 0x840, 0x00010000, + 0x844, 0x00000000, + 0x848, 0x00000000, + 0x84c, 0x00000000, + 0x850, 0x00000000, + 0x854, 0x00000000, + 0x858, 0x569a569a, + 0x85c, 0x001b25a4, + 0x860, 0x66e60230, + 0x864, 0x061f0130, + 0x868, 0x00000000, + 0x86c, 0x20202000, + 0x870, 0x03000300, + 0x874, 0x22004000, + 0x878, 0x00000808, + 0x87c, 0x00ffc3f1, + 0x880, 0xc0083070, + 0x884, 0x000004d5, + 0x888, 0x00000000, + 0x88c, 0xccc000c0, + 0x890, 0x00000800, + 0x894, 0xfffffffe, + 0x898, 0x40302010, + 0x89c, 0x00706050, + 0x900, 0x00000000, + 0x904, 0x00000023, + 0x908, 0x00000000, + 0x90c, 0x81121111, + 0xa00, 0x00d047c8, + 0xa04, 0x80ff000c, + 0xa08, 0x8c838300, + 0xa0c, 0x2e68120f, + 0xa10, 0x9500bb78, + 0xa14, 0x11144028, + 0xa18, 0x00881117, + 0xa1c, 0x89140f00, + 0xa20, 0x15160000, + 0xa24, 0x070b0f12, + 0xa28, 0x00000104, + 0xa2c, 0x00d30000, + 0xa70, 0x101fbf00, + 0xa74, 0x00000007, + 0xc00, 0x48071d40, + 0xc04, 0x03a05611, + 0xc08, 0x000000e4, + 0xc0c, 0x6c6c6c6c, + 0xc10, 0x08800000, + 0xc14, 0x40000100, + 0xc18, 0x08800000, + 0xc1c, 0x40000100, + 0xc20, 0x00000000, + 0xc24, 0x00000000, + 0xc28, 0x00000000, + 0xc2c, 0x00000000, + 0xc30, 0x69e9ac44, + 0xc34, 0x469652cf, + 0xc38, 0x49795994, + 0xc3c, 0x0a97971c, + 0xc40, 0x1f7c403f, + 0xc44, 0x000100b7, + 0xc48, 0xec020107, + 0xc4c, 0x007f037f, + 0xc50, 0x6954342e, + 0xc54, 0x43bc0094, + 0xc58, 0x6954342f, + 0xc5c, 0x433c0094, + 0xc60, 0x00000000, + 0xc64, 0x5116848b, + 0xc68, 0x47c00bff, + 0xc6c, 0x00000036, + 0xc70, 0x2c46000d, + 0xc74, 0x018610db, + 0xc78, 0x0000001f, + 0xc7c, 0x00b91612, + 0xc80, 0x24000090, + 0xc84, 0x20f60000, + 0xc88, 0x24000090, + 0xc8c, 0x20200000, + 0xc90, 0x00121820, + 0xc94, 0x00000000, + 0xc98, 0x00121820, + 0xc9c, 0x00007f7f, + 0xca0, 0x00000000, + 0xca4, 0x00000080, + 0xca8, 0x00000000, + 0xcac, 0x00000000, + 0xcb0, 0x00000000, + 0xcb4, 0x00000000, + 0xcb8, 0x00000000, + 0xcbc, 0x28000000, + 0xcc0, 0x00000000, + 0xcc4, 0x00000000, + 0xcc8, 0x00000000, + 0xccc, 0x00000000, + 0xcd0, 0x00000000, + 0xcd4, 0x00000000, + 0xcd8, 0x64b22427, + 0xcdc, 0x00766932, + 0xce0, 0x00222222, + 0xce4, 0x00000000, + 0xce8, 0x37644302, + 0xcec, 0x2f97d40c, + 0xd00, 0x00080740, + 0xd04, 0x00020401, + 0xd08, 0x0000907f, + 0xd0c, 0x20010201, + 0xd10, 0xa0633333, + 0xd14, 0x3333bc43, + 0xd18, 0x7a8f5b6b, + 0xd2c, 0xcc979975, + 0xd30, 0x00000000, + 0xd34, 0x80608000, + 0xd38, 0x00000000, + 0xd3c, 0x00027293, + 0xd40, 0x00000000, + 0xd44, 0x00000000, + 0xd48, 0x00000000, + 0xd4c, 0x00000000, + 0xd50, 0x6437140a, + 0xd54, 0x00000000, + 0xd58, 0x00000000, + 0xd5c, 0x30032064, + 0xd60, 0x4653de68, + 0xd64, 0x04518a3c, + 0xd68, 0x00002101, + 0xd6c, 0x2a201c16, + 0xd70, 0x1812362e, + 0xd74, 0x322c2220, + 0xd78, 0x000e3c24, + 0xe00, 0x24242424, + 0xe04, 0x24242424, + 0xe08, 0x03902024, + 0xe10, 0x24242424, + 0xe14, 0x24242424, + 0xe18, 0x24242424, + 0xe1c, 0x24242424, + 0xe28, 0x00000000, + 0xe30, 0x1000dc1f, + 0xe34, 0x10008c1f, + 0xe38, 0x02140102, + 0xe3c, 0x681604c2, + 0xe40, 0x01007c00, + 0xe44, 0x01004800, + 0xe48, 0xfb000000, + 0xe4c, 0x000028d1, + 0xe50, 0x1000dc1f, + 0xe54, 0x10008c1f, + 0xe58, 0x02140102, + 0xe5c, 0x28160d05, + 0xe60, 0x00000008, + 0xe68, 0x001b25a4, + 0xe6c, 0x631b25a0, + 0xe70, 0x631b25a0, + 0xe74, 0x081b25a0, + 0xe78, 0x081b25a0, + 0xe7c, 0x081b25a0, + 0xe80, 0x081b25a0, + 0xe84, 0x631b25a0, + 0xe88, 0x081b25a0, + 0xe8c, 0x631b25a0, + 0xed0, 0x631b25a0, + 0xed4, 0x631b25a0, + 0xed8, 0x631b25a0, + 0xedc, 0x001b25a0, + 0xee0, 0x001b25a0, + 0xeec, 0x6b1b25a0, + 0xee8, 0x31555448, + 0xf14, 0x00000003, + 0xf4c, 0x00000000, + 0xf00, 0x00000300, +}; + +u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength] = { + 0xe00, 0xffffffff, 0x06080808, + 0xe04, 0xffffffff, 0x00040406, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x04060608, + 0xe14, 0xffffffff, 0x00020204, + 0xe18, 0xffffffff, 0x04060608, + 0xe1c, 0xffffffff, 0x00020204, + 0x830, 0xffffffff, 0x06080808, + 0x834, 0xffffffff, 0x00040406, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x04060608, + 0x848, 0xffffffff, 0x00020204, + 0x84c, 0xffffffff, 0x04060608, + 0x868, 0xffffffff, 0x00020204, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, +}; + +u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength] = { + 0x000, 0x00030159, + 0x001, 0x00031284, + 0x002, 0x00098000, + 0x003, 0x00018c63, + 0x004, 0x000210e7, + 0x009, 0x0002044f, + 0x00a, 0x0001adb0, + 0x00b, 0x00054867, + 0x00c, 0x0008992e, + 0x00d, 0x0000e529, + 0x00e, 0x00039ce7, + 0x00f, 0x00000451, + 0x019, 0x00000000, + 0x01a, 0x00000255, + 0x01b, 0x00060a00, + 0x01c, 0x000fc378, + 0x01d, 0x000a1250, + 0x01e, 0x0004445f, + 0x01f, 0x00080001, + 0x020, 0x0000b614, + 0x021, 0x0006c000, + 0x022, 0x0000083c, + 0x023, 0x00001558, + 0x024, 0x00000060, + 0x025, 0x00000483, + 0x026, 0x0004f000, + 0x027, 0x000ec7d9, + 0x028, 0x000977c0, + 0x029, 0x00004783, + 0x02a, 0x00000001, + 0x02b, 0x00021334, + 0x02a, 0x00000000, + 0x02b, 0x00000054, + 0x02a, 0x00000001, + 0x02b, 0x00000808, + 0x02b, 0x00053333, + 0x02c, 0x0000000c, + 0x02a, 0x00000002, + 0x02b, 0x00000808, + 0x02b, 0x0005b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000003, + 0x02b, 0x00000808, + 0x02b, 0x00063333, + 0x02c, 0x0000000d, + 0x02a, 0x00000004, + 0x02b, 0x00000808, + 0x02b, 0x0006b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000005, + 0x02b, 0x00000808, + 0x02b, 0x00073333, + 0x02c, 0x0000000d, + 0x02a, 0x00000006, + 0x02b, 0x00000709, + 0x02b, 0x0005b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000007, + 0x02b, 0x00000709, + 0x02b, 0x00063333, + 0x02c, 0x0000000d, + 0x02a, 0x00000008, + 0x02b, 0x0000060a, + 0x02b, 0x0004b333, + 0x02c, 0x0000000d, + 0x02a, 0x00000009, + 0x02b, 0x0000060a, + 0x02b, 0x00053333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000a, + 0x02b, 0x0000060a, + 0x02b, 0x0005b333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000b, + 0x02b, 0x0000060a, + 0x02b, 0x00063333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000c, + 0x02b, 0x0000060a, + 0x02b, 0x0006b333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000d, + 0x02b, 0x0000060a, + 0x02b, 0x00073333, + 0x02c, 0x0000000d, + 0x02a, 0x0000000e, + 0x02b, 0x0000050b, + 0x02b, 0x00066666, + 0x02c, 0x0000001a, + 0x02a, 0x000e0000, + 0x010, 0x0004000f, + 0x011, 0x000e31fc, + 0x010, 0x0006000f, + 0x011, 0x000ff9f8, + 0x010, 0x0002000f, + 0x011, 0x000203f9, + 0x010, 0x0003000f, + 0x011, 0x000ff500, + 0x010, 0x00000000, + 0x011, 0x00000000, + 0x010, 0x0008000f, + 0x011, 0x0003f100, + 0x010, 0x0009000f, + 0x011, 0x00023100, + 0x012, 0x000d8000, + 0x012, 0x00090000, + 0x012, 0x00051000, + 0x012, 0x00012000, + 0x013, 0x00028fb4, + 0x013, 0x00024fa8, + 0x013, 0x000207a4, + 0x013, 0x0001c798, + 0x013, 0x000183a4, + 0x013, 0x00014398, + 0x013, 0x000101a4, + 0x013, 0x0000c198, + 0x013, 0x000080a4, + 0x013, 0x00004098, + 0x013, 0x00000000, + 0x014, 0x0001944c, + 0x014, 0x00059444, + 0x014, 0x0009944c, + 0x014, 0x000d9444, + 0x015, 0x0000f405, + 0x015, 0x0004f405, + 0x015, 0x0008f405, + 0x015, 0x000cf405, + 0x016, 0x000e0330, + 0x016, 0x000a0330, + 0x016, 0x00060330, + 0x016, 0x00020330, + 0x000, 0x00010159, + 0x018, 0x0000f401, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x01f, 0x00080003, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x01e, 0x00044457, + 0x01f, 0x00080000, + 0x000, 0x00030159, +}; + +u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength] = { + 0xc78, 0x7b000001, + 0xc78, 0x7b010001, + 0xc78, 0x7b020001, + 0xc78, 0x7b030001, + 0xc78, 0x7b040001, + 0xc78, 0x7b050001, + 0xc78, 0x7b060001, + 0xc78, 0x7b070001, + 0xc78, 0x7b080001, + 0xc78, 0x7a090001, + 0xc78, 0x790a0001, + 0xc78, 0x780b0001, + 0xc78, 0x770c0001, + 0xc78, 0x760d0001, + 0xc78, 0x750e0001, + 0xc78, 0x740f0001, + 0xc78, 0x73100001, + 0xc78, 0x72110001, + 0xc78, 0x71120001, + 0xc78, 0x70130001, + 0xc78, 0x6f140001, + 0xc78, 0x6e150001, + 0xc78, 0x6d160001, + 0xc78, 0x6c170001, + 0xc78, 0x6b180001, + 0xc78, 0x6a190001, + 0xc78, 0x691a0001, + 0xc78, 0x681b0001, + 0xc78, 0x671c0001, + 0xc78, 0x661d0001, + 0xc78, 0x651e0001, + 0xc78, 0x641f0001, + 0xc78, 0x63200001, + 0xc78, 0x62210001, + 0xc78, 0x61220001, + 0xc78, 0x60230001, + 0xc78, 0x46240001, + 0xc78, 0x45250001, + 0xc78, 0x44260001, + 0xc78, 0x43270001, + 0xc78, 0x42280001, + 0xc78, 0x41290001, + 0xc78, 0x402a0001, + 0xc78, 0x262b0001, + 0xc78, 0x252c0001, + 0xc78, 0x242d0001, + 0xc78, 0x232e0001, + 0xc78, 0x222f0001, + 0xc78, 0x21300001, + 0xc78, 0x20310001, + 0xc78, 0x06320001, + 0xc78, 0x05330001, + 0xc78, 0x04340001, + 0xc78, 0x03350001, + 0xc78, 0x02360001, + 0xc78, 0x01370001, + 0xc78, 0x00380001, + 0xc78, 0x00390001, + 0xc78, 0x003a0001, + 0xc78, 0x003b0001, + 0xc78, 0x003c0001, + 0xc78, 0x003d0001, + 0xc78, 0x003e0001, + 0xc78, 0x003f0001, + 0xc78, 0x7b400001, + 0xc78, 0x7b410001, + 0xc78, 0x7b420001, + 0xc78, 0x7b430001, + 0xc78, 0x7b440001, + 0xc78, 0x7b450001, + 0xc78, 0x7b460001, + 0xc78, 0x7b470001, + 0xc78, 0x7b480001, + 0xc78, 0x7a490001, + 0xc78, 0x794a0001, + 0xc78, 0x784b0001, + 0xc78, 0x774c0001, + 0xc78, 0x764d0001, + 0xc78, 0x754e0001, + 0xc78, 0x744f0001, + 0xc78, 0x73500001, + 0xc78, 0x72510001, + 0xc78, 0x71520001, + 0xc78, 0x70530001, + 0xc78, 0x6f540001, + 0xc78, 0x6e550001, + 0xc78, 0x6d560001, + 0xc78, 0x6c570001, + 0xc78, 0x6b580001, + 0xc78, 0x6a590001, + 0xc78, 0x695a0001, + 0xc78, 0x685b0001, + 0xc78, 0x675c0001, + 0xc78, 0x665d0001, + 0xc78, 0x655e0001, + 0xc78, 0x645f0001, + 0xc78, 0x63600001, + 0xc78, 0x62610001, + 0xc78, 0x61620001, + 0xc78, 0x60630001, + 0xc78, 0x46640001, + 0xc78, 0x45650001, + 0xc78, 0x44660001, + 0xc78, 0x43670001, + 0xc78, 0x42680001, + 0xc78, 0x41690001, + 0xc78, 0x406a0001, + 0xc78, 0x266b0001, + 0xc78, 0x256c0001, + 0xc78, 0x246d0001, + 0xc78, 0x236e0001, + 0xc78, 0x226f0001, + 0xc78, 0x21700001, + 0xc78, 0x20710001, + 0xc78, 0x06720001, + 0xc78, 0x05730001, + 0xc78, 0x04740001, + 0xc78, 0x03750001, + 0xc78, 0x02760001, + 0xc78, 0x01770001, + 0xc78, 0x00780001, + 0xc78, 0x00790001, + 0xc78, 0x007a0001, + 0xc78, 0x007b0001, + 0xc78, 0x007c0001, + 0xc78, 0x007d0001, + 0xc78, 0x007e0001, + 0xc78, 0x007f0001, + 0xc78, 0x3800001e, + 0xc78, 0x3801001e, + 0xc78, 0x3802001e, + 0xc78, 0x3803001e, + 0xc78, 0x3804001e, + 0xc78, 0x3805001e, + 0xc78, 0x3806001e, + 0xc78, 0x3807001e, + 0xc78, 0x3808001e, + 0xc78, 0x3c09001e, + 0xc78, 0x3e0a001e, + 0xc78, 0x400b001e, + 0xc78, 0x440c001e, + 0xc78, 0x480d001e, + 0xc78, 0x4c0e001e, + 0xc78, 0x500f001e, + 0xc78, 0x5210001e, + 0xc78, 0x5611001e, + 0xc78, 0x5a12001e, + 0xc78, 0x5e13001e, + 0xc78, 0x6014001e, + 0xc78, 0x6015001e, + 0xc78, 0x6016001e, + 0xc78, 0x6217001e, + 0xc78, 0x6218001e, + 0xc78, 0x6219001e, + 0xc78, 0x621a001e, + 0xc78, 0x621b001e, + 0xc78, 0x621c001e, + 0xc78, 0x621d001e, + 0xc78, 0x621e001e, + 0xc78, 0x621f001e, +}; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/table.h b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h new file mode 100644 index 000000000000..c3d5cd826cfa --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/table.h @@ -0,0 +1,71 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#ifndef __RTL92CU_TABLE__H_ +#define __RTL92CU_TABLE__H_ + +#include <linux/types.h> + +#define RTL8192CUPHY_REG_2TARRAY_LENGTH 374 +extern u32 RTL8192CUPHY_REG_2TARRAY[RTL8192CUPHY_REG_2TARRAY_LENGTH]; +#define RTL8192CUPHY_REG_1TARRAY_LENGTH 374 +extern u32 RTL8192CUPHY_REG_1TARRAY[RTL8192CUPHY_REG_1TARRAY_LENGTH]; + +#define RTL8192CUPHY_REG_ARRAY_PGLENGTH 336 +extern u32 RTL8192CUPHY_REG_ARRAY_PG[RTL8192CUPHY_REG_ARRAY_PGLENGTH]; + +#define RTL8192CURADIOA_2TARRAYLENGTH 282 +extern u32 RTL8192CURADIOA_2TARRAY[RTL8192CURADIOA_2TARRAYLENGTH]; +#define RTL8192CURADIOB_2TARRAYLENGTH 78 +extern u32 RTL8192CU_RADIOB_2TARRAY[RTL8192CURADIOB_2TARRAYLENGTH]; +#define RTL8192CURADIOA_1TARRAYLENGTH 282 +extern u32 RTL8192CU_RADIOA_1TARRAY[RTL8192CURADIOA_1TARRAYLENGTH]; +#define RTL8192CURADIOB_1TARRAYLENGTH 1 +extern u32 RTL8192CU_RADIOB_1TARRAY[RTL8192CURADIOB_1TARRAYLENGTH]; + +#define RTL8192CUMAC_2T_ARRAYLENGTH 172 +extern u32 RTL8192CUMAC_2T_ARRAY[RTL8192CUMAC_2T_ARRAYLENGTH]; + +#define RTL8192CUAGCTAB_2TARRAYLENGTH 320 +extern u32 RTL8192CUAGCTAB_2TARRAY[RTL8192CUAGCTAB_2TARRAYLENGTH]; +#define RTL8192CUAGCTAB_1TARRAYLENGTH 320 +extern u32 RTL8192CUAGCTAB_1TARRAY[RTL8192CUAGCTAB_1TARRAYLENGTH]; + +#define RTL8192CUPHY_REG_1T_HPArrayLength 378 +extern u32 RTL8192CUPHY_REG_1T_HPArray[RTL8192CUPHY_REG_1T_HPArrayLength]; + +#define RTL8192CUPHY_REG_Array_PG_HPLength 336 +extern u32 RTL8192CUPHY_REG_Array_PG_HP[RTL8192CUPHY_REG_Array_PG_HPLength]; + +#define RTL8192CURadioA_1T_HPArrayLength 282 +extern u32 RTL8192CURadioA_1T_HPArray[RTL8192CURadioA_1T_HPArrayLength]; +#define RTL8192CUAGCTAB_1T_HPArrayLength 320 +extern u32 Rtl8192CUAGCTAB_1T_HPArray[RTL8192CUAGCTAB_1T_HPArrayLength]; + +#endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c new file mode 100644 index 000000000000..d0b0d43b9a6d --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c @@ -0,0 +1,687 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#include "../wifi.h" +#include "../usb.h" +#include "../ps.h" +#include "../base.h" +#include "reg.h" +#include "def.h" +#include "phy.h" +#include "rf.h" +#include "dm.h" +#include "mac.h" +#include "trx.h" + +static int _ConfigVerTOutEP(struct ieee80211_hw *hw) +{ + u8 ep_cfg, txqsele; + u8 ep_nums = 0; + + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); + + rtlusb->out_queue_sel = 0; + ep_cfg = rtl_read_byte(rtlpriv, REG_TEST_SIE_OPTIONAL); + ep_cfg = (ep_cfg & USB_TEST_EP_MASK) >> USB_TEST_EP_SHIFT; + switch (ep_cfg) { + case 0: /* 2 bulk OUT, 1 bulk IN */ + case 3: + rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_LQ; + ep_nums = 2; + break; + case 1: /* 1 bulk IN/OUT => map all endpoint to Low queue */ + case 2: /* 1 bulk IN, 1 bulk OUT => map all endpoint to High queue */ + txqsele = rtl_read_byte(rtlpriv, REG_TEST_USB_TXQS); + if (txqsele & 0x0F) /* /map all endpoint to High queue */ + rtlusb->out_queue_sel = TX_SELE_HQ; + else if (txqsele&0xF0) /* map all endpoint to Low queue */ + rtlusb->out_queue_sel = TX_SELE_LQ; + ep_nums = 1; + break; + default: + break; + } + return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL; +} + +static int _ConfigVerNOutEP(struct ieee80211_hw *hw) +{ + u8 ep_cfg; + u8 ep_nums = 0; + + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); + + rtlusb->out_queue_sel = 0; + /* Normal and High queue */ + ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 1)); + if (ep_cfg & USB_NORMAL_SIE_EP_MASK) { + rtlusb->out_queue_sel |= TX_SELE_HQ; + ep_nums++; + } + if ((ep_cfg >> USB_NORMAL_SIE_EP_SHIFT) & USB_NORMAL_SIE_EP_MASK) { + rtlusb->out_queue_sel |= TX_SELE_NQ; + ep_nums++; + } + /* Low queue */ + ep_cfg = rtl_read_byte(rtlpriv, (REG_NORMAL_SIE_EP + 2)); + if (ep_cfg & USB_NORMAL_SIE_EP_MASK) { + rtlusb->out_queue_sel |= TX_SELE_LQ; + ep_nums++; + } + return (rtlusb->out_ep_nums == ep_nums) ? 0 : -EINVAL; +} + +static void _TwoOutEpMapping(struct ieee80211_hw *hw, bool bIsChipB, + bool bwificfg, struct rtl_ep_map *ep_map) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (bwificfg) { /* for WMM */ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("USB Chip-B & WMM Setting.....\n")); + ep_map->ep_mapping[RTL_TXQ_BE] = 2; + ep_map->ep_mapping[RTL_TXQ_BK] = 3; + ep_map->ep_mapping[RTL_TXQ_VI] = 3; + ep_map->ep_mapping[RTL_TXQ_VO] = 2; + ep_map->ep_mapping[RTL_TXQ_MGT] = 2; + ep_map->ep_mapping[RTL_TXQ_BCN] = 2; + ep_map->ep_mapping[RTL_TXQ_HI] = 2; + } else { /* typical setting */ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("USB typical Setting.....\n")); + ep_map->ep_mapping[RTL_TXQ_BE] = 3; + ep_map->ep_mapping[RTL_TXQ_BK] = 3; + ep_map->ep_mapping[RTL_TXQ_VI] = 2; + ep_map->ep_mapping[RTL_TXQ_VO] = 2; + ep_map->ep_mapping[RTL_TXQ_MGT] = 2; + ep_map->ep_mapping[RTL_TXQ_BCN] = 2; + ep_map->ep_mapping[RTL_TXQ_HI] = 2; + } +} + +static void _ThreeOutEpMapping(struct ieee80211_hw *hw, bool bwificfg, + struct rtl_ep_map *ep_map) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + if (bwificfg) { /* for WMM */ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("USB 3EP Setting for WMM.....\n")); + ep_map->ep_mapping[RTL_TXQ_BE] = 5; + ep_map->ep_mapping[RTL_TXQ_BK] = 3; + ep_map->ep_mapping[RTL_TXQ_VI] = 3; + ep_map->ep_mapping[RTL_TXQ_VO] = 2; + ep_map->ep_mapping[RTL_TXQ_MGT] = 2; + ep_map->ep_mapping[RTL_TXQ_BCN] = 2; + ep_map->ep_mapping[RTL_TXQ_HI] = 2; + } else { /* typical setting */ + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("USB 3EP Setting for typical.....\n")); + ep_map->ep_mapping[RTL_TXQ_BE] = 5; + ep_map->ep_mapping[RTL_TXQ_BK] = 5; + ep_map->ep_mapping[RTL_TXQ_VI] = 3; + ep_map->ep_mapping[RTL_TXQ_VO] = 2; + ep_map->ep_mapping[RTL_TXQ_MGT] = 2; + ep_map->ep_mapping[RTL_TXQ_BCN] = 2; + ep_map->ep_mapping[RTL_TXQ_HI] = 2; + } +} + +static void _OneOutEpMapping(struct ieee80211_hw *hw, struct rtl_ep_map *ep_map) +{ + ep_map->ep_mapping[RTL_TXQ_BE] = 2; + ep_map->ep_mapping[RTL_TXQ_BK] = 2; + ep_map->ep_mapping[RTL_TXQ_VI] = 2; + ep_map->ep_mapping[RTL_TXQ_VO] = 2; + ep_map->ep_mapping[RTL_TXQ_MGT] = 2; + ep_map->ep_mapping[RTL_TXQ_BCN] = 2; + ep_map->ep_mapping[RTL_TXQ_HI] = 2; +} +static int _out_ep_mapping(struct ieee80211_hw *hw) +{ + int err = 0; + bool bIsChipN, bwificfg = false; + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); + struct rtl_ep_map *ep_map = &(rtlusb->ep_map); + + bIsChipN = IS_NORMAL_CHIP(rtlhal->version); + switch (rtlusb->out_ep_nums) { + case 2: + _TwoOutEpMapping(hw, bIsChipN, bwificfg, ep_map); + break; + case 3: + /* Test chip doesn't support three out EPs. */ + if (!bIsChipN) { + err = -EINVAL; + goto err_out; + } + _ThreeOutEpMapping(hw, bIsChipN, ep_map); + break; + case 1: + _OneOutEpMapping(hw, ep_map); + break; + default: + err = -EINVAL; + break; + } +err_out: + return err; + +} +/* endpoint mapping */ +int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw) +{ + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + int error = 0; + if (likely(IS_NORMAL_CHIP(rtlhal->version))) + error = _ConfigVerNOutEP(hw); + else + error = _ConfigVerTOutEP(hw); + if (error) + goto err_out; + error = _out_ep_mapping(hw); + if (error) + goto err_out; +err_out: + return error; +} + +u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index) +{ + u16 hw_queue_index; + + if (unlikely(ieee80211_is_beacon(fc))) { + hw_queue_index = RTL_TXQ_BCN; + goto out; + } + if (ieee80211_is_mgmt(fc)) { + hw_queue_index = RTL_TXQ_MGT; + goto out; + } + switch (mac80211_queue_index) { + case 0: + hw_queue_index = RTL_TXQ_VO; + break; + case 1: + hw_queue_index = RTL_TXQ_VI; + break; + case 2: + hw_queue_index = RTL_TXQ_BE; + break; + case 3: + hw_queue_index = RTL_TXQ_BK; + break; + default: + hw_queue_index = RTL_TXQ_BE; + RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n", + mac80211_queue_index)); + break; + } +out: + return hw_queue_index; +} + +static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw, + __le16 fc, u16 mac80211_queue_index) +{ + enum rtl_desc_qsel qsel; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (unlikely(ieee80211_is_beacon(fc))) { + qsel = QSLT_BEACON; + goto out; + } + if (ieee80211_is_mgmt(fc)) { + qsel = QSLT_MGNT; + goto out; + } + switch (mac80211_queue_index) { + case 0: /* VO */ + qsel = QSLT_VO; + RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, + ("VO queue, set qsel = 0x%x\n", QSLT_VO)); + break; + case 1: /* VI */ + qsel = QSLT_VI; + RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, + ("VI queue, set qsel = 0x%x\n", QSLT_VI)); + break; + case 3: /* BK */ + qsel = QSLT_BK; + RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, + ("BK queue, set qsel = 0x%x\n", QSLT_BK)); + break; + case 2: /* BE */ + default: + qsel = QSLT_BE; + RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG, + ("BE queue, set qsel = 0x%x\n", QSLT_BE)); + break; + } +out: + return qsel; +} + +/* =============================================================== */ + +/*---------------------------------------------------------------------- + * + * Rx handler + * + *---------------------------------------------------------------------- */ +bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *stats, + struct ieee80211_rx_status *rx_status, + u8 *p_desc, struct sk_buff *skb) +{ + struct rx_fwinfo_92c *p_drvinfo; + struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc; + u32 phystatus = GET_RX_DESC_PHY_STATUS(pdesc); + + stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); + stats->rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(pdesc) * + RX_DRV_INFO_SIZE_UNIT; + stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03); + stats->icv = (u16) GET_RX_DESC_ICV(pdesc); + stats->crc = (u16) GET_RX_DESC_CRC32(pdesc); + stats->hwerror = (stats->crc | stats->icv); + stats->decrypted = !GET_RX_DESC_SWDEC(pdesc); + stats->rate = (u8) GET_RX_DESC_RX_MCS(pdesc); + stats->shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc); + stats->isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1); + stats->isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1) + && (GET_RX_DESC_FAGGR(pdesc) == 1)); + stats->timestamp_low = GET_RX_DESC_TSFL(pdesc); + stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc); + rx_status->freq = hw->conf.channel->center_freq; + rx_status->band = hw->conf.channel->band; + if (GET_RX_DESC_CRC32(pdesc)) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (!GET_RX_DESC_SWDEC(pdesc)) + rx_status->flag |= RX_FLAG_DECRYPTED; + if (GET_RX_DESC_BW(pdesc)) + rx_status->flag |= RX_FLAG_40MHZ; + if (GET_RX_DESC_RX_HT(pdesc)) + rx_status->flag |= RX_FLAG_HT; + rx_status->flag |= RX_FLAG_MACTIME_MPDU; + if (stats->decrypted) + rx_status->flag |= RX_FLAG_DECRYPTED; + rx_status->rate_idx = _rtl92c_rate_mapping(hw, + (bool)GET_RX_DESC_RX_HT(pdesc), + (u8)GET_RX_DESC_RX_MCS(pdesc), + (bool)GET_RX_DESC_PAGGR(pdesc)); + rx_status->mactime = GET_RX_DESC_TSFL(pdesc); + if (phystatus == true) { + p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE); + rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc, + p_drvinfo); + } + /*rx_status->qual = stats->signal; */ + rx_status->signal = stats->rssi + 10; + /*rx_status->noise = -stats->noise; */ + return true; +} + +#define RTL_RX_DRV_INFO_UNIT 8 + +static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status = + (struct ieee80211_rx_status *)IEEE80211_SKB_RXCB(skb); + u32 skb_len, pkt_len, drvinfo_len; + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 *rxdesc; + struct rtl_stats stats = { + .signal = 0, + .noise = -98, + .rate = 0, + }; + struct rx_fwinfo_92c *p_drvinfo; + bool bv; + __le16 fc; + struct ieee80211_hdr *hdr; + + memset(rx_status, 0, sizeof(rx_status)); + rxdesc = skb->data; + skb_len = skb->len; + drvinfo_len = (GET_RX_DESC_DRVINFO_SIZE(rxdesc) * RTL_RX_DRV_INFO_UNIT); + pkt_len = GET_RX_DESC_PKT_LEN(rxdesc); + /* TODO: Error recovery. drop this skb or something. */ + WARN_ON(skb_len < (pkt_len + RTL_RX_DESC_SIZE + drvinfo_len)); + stats.length = (u16) GET_RX_DESC_PKT_LEN(rxdesc); + stats.rx_drvinfo_size = (u8)GET_RX_DESC_DRVINFO_SIZE(rxdesc) * + RX_DRV_INFO_SIZE_UNIT; + stats.rx_bufshift = (u8) (GET_RX_DESC_SHIFT(rxdesc) & 0x03); + stats.icv = (u16) GET_RX_DESC_ICV(rxdesc); + stats.crc = (u16) GET_RX_DESC_CRC32(rxdesc); + stats.hwerror = (stats.crc | stats.icv); + stats.decrypted = !GET_RX_DESC_SWDEC(rxdesc); + stats.rate = (u8) GET_RX_DESC_RX_MCS(rxdesc); + stats.shortpreamble = (u16) GET_RX_DESC_SPLCP(rxdesc); + stats.isampdu = (bool) ((GET_RX_DESC_PAGGR(rxdesc) == 1) + && (GET_RX_DESC_FAGGR(rxdesc) == 1)); + stats.timestamp_low = GET_RX_DESC_TSFL(rxdesc); + stats.rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(rxdesc); + /* TODO: is center_freq changed when doing scan? */ + /* TODO: Shall we add protection or just skip those two step? */ + rx_status->freq = hw->conf.channel->center_freq; + rx_status->band = hw->conf.channel->band; + if (GET_RX_DESC_CRC32(rxdesc)) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (!GET_RX_DESC_SWDEC(rxdesc)) + rx_status->flag |= RX_FLAG_DECRYPTED; + if (GET_RX_DESC_BW(rxdesc)) + rx_status->flag |= RX_FLAG_40MHZ; + if (GET_RX_DESC_RX_HT(rxdesc)) + rx_status->flag |= RX_FLAG_HT; + /* Data rate */ + rx_status->rate_idx = _rtl92c_rate_mapping(hw, + (bool)GET_RX_DESC_RX_HT(rxdesc), + (u8)GET_RX_DESC_RX_MCS(rxdesc), + (bool)GET_RX_DESC_PAGGR(rxdesc) + ); + /* There is a phy status after this rx descriptor. */ + if (GET_RX_DESC_PHY_STATUS(rxdesc)) { + p_drvinfo = (struct rx_fwinfo_92c *)(rxdesc + RTL_RX_DESC_SIZE); + rtl92c_translate_rx_signal_stuff(hw, skb, &stats, + (struct rx_desc_92c *)rxdesc, p_drvinfo); + } + skb_pull(skb, (drvinfo_len + RTL_RX_DESC_SIZE)); + hdr = (struct ieee80211_hdr *)(skb->data); + fc = hdr->frame_control; + bv = ieee80211_is_probe_resp(fc); + if (bv) + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("Got probe response frame.\n")); + if (ieee80211_is_beacon(fc)) + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("Got beacon frame.\n")); + if (ieee80211_is_data(fc)) + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Got data frame.\n")); + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:" + "0x%02X\n", fc, (u32)hdr->addr1[0], (u32)hdr->addr1[1], + (u32)hdr->addr1[2], (u32)hdr->addr1[3], (u32)hdr->addr1[4], + (u32)hdr->addr1[5])); + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + ieee80211_rx_irqsafe(hw, skb); +} + +void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb) +{ + _rtl_rx_process(hw, skb); +} + +void rtl8192c_rx_segregate_hdl( + struct ieee80211_hw *hw, + struct sk_buff *skb, + struct sk_buff_head *skb_list) +{ +} + +/*---------------------------------------------------------------------- + * + * Tx handler + * + *---------------------------------------------------------------------- */ +void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb) +{ +} + +int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb, + struct sk_buff *skb) +{ + return 0; +} + +struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *hw, + struct sk_buff_head *list) +{ + return skb_dequeue(list); +} + +/*======================================== trx ===============================*/ + +static void _rtl_fill_usb_tx_desc(u8 *txdesc) +{ + SET_TX_DESC_OWN(txdesc, 1); + SET_TX_DESC_LAST_SEG(txdesc, 1); + SET_TX_DESC_FIRST_SEG(txdesc, 1); +} +/** + * For HW recovery information + */ +static void _rtl_tx_desc_checksum(u8 *txdesc) +{ + u16 *ptr = (u16 *)txdesc; + u16 checksum = 0; + u32 index; + + /* Clear first */ + SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0); + for (index = 0; index < 16; index++) + checksum = checksum ^ (*(ptr + index)); + SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum); +} + +void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, + struct ieee80211_tx_info *info, struct sk_buff *skb, + unsigned int queue_index) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool defaultadapter = true; + struct ieee80211_sta *sta; + struct rtl_tcb_desc tcb_desc; + u8 *qc = ieee80211_get_qos_ctl(hdr); + u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + u16 seq_number; + __le16 fc = hdr->frame_control; + u8 rate_flag = info->control.rates[0].flags; + u16 pktlen = skb->len; + enum rtl_desc_qsel fw_qsel = _rtl8192cu_mq_to_descq(hw, fc, + skb_get_queue_mapping(skb)); + u8 *txdesc; + + seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; + rtl_get_tcb_desc(hw, info, skb, &tcb_desc); + txdesc = (u8 *)skb_push(skb, RTL_TX_HEADER_SIZE); + memset(txdesc, 0, RTL_TX_HEADER_SIZE); + SET_TX_DESC_PKT_SIZE(txdesc, pktlen); + SET_TX_DESC_LINIP(txdesc, 0); + SET_TX_DESC_PKT_OFFSET(txdesc, RTL_DUMMY_OFFSET); + SET_TX_DESC_OFFSET(txdesc, RTL_TX_HEADER_SIZE); + SET_TX_DESC_TX_RATE(txdesc, tcb_desc.hw_rate); + if (tcb_desc.use_shortgi || tcb_desc.use_shortpreamble) + SET_TX_DESC_DATA_SHORTGI(txdesc, 1); + if (mac->tids[tid].agg.agg_state == RTL_AGG_ON && + info->flags & IEEE80211_TX_CTL_AMPDU) { + SET_TX_DESC_AGG_ENABLE(txdesc, 1); + SET_TX_DESC_MAX_AGG_NUM(txdesc, 0x14); + } else { + SET_TX_DESC_AGG_BREAK(txdesc, 1); + } + SET_TX_DESC_SEQ(txdesc, seq_number); + SET_TX_DESC_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable && + !tcb_desc.cts_enable) ? 1 : 0)); + SET_TX_DESC_HW_RTS_ENABLE(txdesc, ((tcb_desc.rts_enable || + tcb_desc.cts_enable) ? 1 : 0)); + SET_TX_DESC_CTS2SELF(txdesc, ((tcb_desc.cts_enable) ? 1 : 0)); + SET_TX_DESC_RTS_STBC(txdesc, ((tcb_desc.rts_stbc) ? 1 : 0)); + SET_TX_DESC_RTS_RATE(txdesc, tcb_desc.rts_rate); + SET_TX_DESC_RTS_BW(txdesc, 0); + SET_TX_DESC_RTS_SC(txdesc, tcb_desc.rts_sc); + SET_TX_DESC_RTS_SHORT(txdesc, + ((tcb_desc.rts_rate <= DESC92C_RATE54M) ? + (tcb_desc.rts_use_shortpreamble ? 1 : 0) + : (tcb_desc.rts_use_shortgi ? 1 : 0))); + if (mac->bw_40) { + if (tcb_desc.packet_bw) { + SET_TX_DESC_DATA_BW(txdesc, 1); + SET_TX_DESC_DATA_SC(txdesc, 3); + } else { + SET_TX_DESC_DATA_BW(txdesc, 0); + if (rate_flag & IEEE80211_TX_RC_DUP_DATA) + SET_TX_DESC_DATA_SC(txdesc, + mac->cur_40_prime_sc); + } + } else { + SET_TX_DESC_DATA_BW(txdesc, 0); + SET_TX_DESC_DATA_SC(txdesc, 0); + } + rcu_read_lock(); + sta = ieee80211_find_sta(mac->vif, mac->bssid); + if (sta) { + u8 ampdu_density = sta->ht_cap.ampdu_density; + SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density); + } + rcu_read_unlock(); + if (info->control.hw_key) { + struct ieee80211_key_conf *keyconf = info->control.hw_key; + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + SET_TX_DESC_SEC_TYPE(txdesc, 0x1); + break; + case WLAN_CIPHER_SUITE_CCMP: + SET_TX_DESC_SEC_TYPE(txdesc, 0x3); + break; + default: + SET_TX_DESC_SEC_TYPE(txdesc, 0x0); + break; + } + } + SET_TX_DESC_PKT_ID(txdesc, 0); + SET_TX_DESC_QUEUE_SEL(txdesc, fw_qsel); + SET_TX_DESC_DATA_RATE_FB_LIMIT(txdesc, 0x1F); + SET_TX_DESC_RTS_RATE_FB_LIMIT(txdesc, 0xF); + SET_TX_DESC_DISABLE_FB(txdesc, 0); + SET_TX_DESC_USE_RATE(txdesc, tcb_desc.use_driver_rate ? 1 : 0); + if (ieee80211_is_data_qos(fc)) { + if (mac->rdg_en) { + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, + ("Enable RDG function.\n")); + SET_TX_DESC_RDG_ENABLE(txdesc, 1); + SET_TX_DESC_HTC(txdesc, 1); + } + } + if (rtlpriv->dm.useramask) { + SET_TX_DESC_RATE_ID(txdesc, tcb_desc.ratr_index); + SET_TX_DESC_MACID(txdesc, tcb_desc.mac_id); + } else { + SET_TX_DESC_RATE_ID(txdesc, 0xC + tcb_desc.ratr_index); + SET_TX_DESC_MACID(txdesc, tcb_desc.ratr_index); + } + if ((!ieee80211_is_data_qos(fc)) && ppsc->leisure_ps && + ppsc->fwctrl_lps) { + SET_TX_DESC_HWSEQ_EN(txdesc, 1); + SET_TX_DESC_PKT_ID(txdesc, 8); + if (!defaultadapter) + SET_TX_DESC_QOS(txdesc, 1); + } + if (ieee80211_has_morefrags(fc)) + SET_TX_DESC_MORE_FRAG(txdesc, 1); + if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || + is_broadcast_ether_addr(ieee80211_get_DA(hdr))) + SET_TX_DESC_BMC(txdesc, 1); + _rtl_fill_usb_tx_desc(txdesc); + _rtl_tx_desc_checksum(txdesc); + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, (" %s ==>\n", __func__)); +} + +void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, + u32 buffer_len, bool bIsPsPoll) +{ + /* Clear all status */ + memset(pDesc, 0, RTL_TX_HEADER_SIZE); + SET_TX_DESC_FIRST_SEG(pDesc, 1); /* bFirstSeg; */ + SET_TX_DESC_LAST_SEG(pDesc, 1); /* bLastSeg; */ + SET_TX_DESC_OFFSET(pDesc, RTL_TX_HEADER_SIZE); /* Offset = 32 */ + SET_TX_DESC_PKT_SIZE(pDesc, buffer_len); /* Buffer size + command hdr */ + SET_TX_DESC_QUEUE_SEL(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */ + /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error + * vlaue by Hw. */ + if (bIsPsPoll) { + SET_TX_DESC_NAV_USE_HDR(pDesc, 1); + } else { + SET_TX_DESC_HWSEQ_EN(pDesc, 1); /* Hw set sequence number */ + SET_TX_DESC_PKT_ID(pDesc, 0x100); /* set bit3 to 1. */ + } + SET_TX_DESC_USE_RATE(pDesc, 1); /* use data rate which is set by Sw */ + SET_TX_DESC_OWN(pDesc, 1); + SET_TX_DESC_TX_RATE(pDesc, DESC92C_RATE1M); + _rtl_tx_desc_checksum(pDesc); +} + +void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, + u8 *pdesc, bool firstseg, + bool lastseg, struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 fw_queue = QSLT_BEACON; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); + __le16 fc = hdr->frame_control; + + memset((void *)pdesc, 0, RTL_TX_HEADER_SIZE); + if (firstseg) + SET_TX_DESC_OFFSET(pdesc, RTL_TX_HEADER_SIZE); + SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M); + SET_TX_DESC_SEQ(pdesc, 0); + SET_TX_DESC_LINIP(pdesc, 0); + SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue); + SET_TX_DESC_FIRST_SEG(pdesc, 1); + SET_TX_DESC_LAST_SEG(pdesc, 1); + SET_TX_DESC_RATE_ID(pdesc, 7); + SET_TX_DESC_MACID(pdesc, 0); + SET_TX_DESC_OWN(pdesc, 1); + SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len)); + SET_TX_DESC_FIRST_SEG(pdesc, 1); + SET_TX_DESC_LAST_SEG(pdesc, 1); + SET_TX_DESC_OFFSET(pdesc, 0x20); + SET_TX_DESC_USE_RATE(pdesc, 1); + if (!ieee80211_is_data_qos(fc)) { + SET_TX_DESC_HWSEQ_EN(pdesc, 1); + SET_TX_DESC_PKT_ID(pdesc, 8); + } + RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content\n", + pdesc, RTL_TX_DESC_SIZE); +} + +bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + return true; +} diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h new file mode 100644 index 000000000000..b396d46edbb7 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.h @@ -0,0 +1,430 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2010 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + * Larry Finger <Larry.Finger@lwfinger.net> + * + *****************************************************************************/ + +#ifndef __RTL92CU_TRX_H__ +#define __RTL92CU_TRX_H__ + +#define RTL92C_USB_BULK_IN_NUM 1 +#define RTL92C_NUM_RX_URBS 8 +#define RTL92C_NUM_TX_URBS 32 + +#define RTL92C_SIZE_MAX_RX_BUFFER 15360 /* 8192 */ +#define RX_DRV_INFO_SIZE_UNIT 8 + +enum usb_rx_agg_mode { + USB_RX_AGG_DISABLE, + USB_RX_AGG_DMA, + USB_RX_AGG_USB, + USB_RX_AGG_DMA_USB +}; + +#define TX_SELE_HQ BIT(0) /* High Queue */ +#define TX_SELE_LQ BIT(1) /* Low Queue */ +#define TX_SELE_NQ BIT(2) /* Normal Queue */ + +#define RTL_USB_TX_AGG_NUM_DESC 5 + +#define RTL_USB_RX_AGG_PAGE_NUM 4 +#define RTL_USB_RX_AGG_PAGE_TIMEOUT 3 + +#define RTL_USB_RX_AGG_BLOCK_NUM 5 +#define RTL_USB_RX_AGG_BLOCK_TIMEOUT 3 + +/*======================== rx status =========================================*/ + +struct rx_drv_info_92c { + /* + * Driver info contain PHY status and other variabel size info + * PHY Status content as below + */ + + /* DWORD 0 */ + u8 gain_trsw[4]; + + /* DWORD 1 */ + u8 pwdb_all; + u8 cfosho[4]; + + /* DWORD 2 */ + u8 cfotail[4]; + + /* DWORD 3 */ + s8 rxevm[2]; + s8 rxsnr[4]; + + /* DWORD 4 */ + u8 pdsnr[2]; + + /* DWORD 5 */ + u8 csi_current[2]; + u8 csi_target[2]; + + /* DWORD 6 */ + u8 sigevm; + u8 max_ex_pwr; + u8 ex_intf_flag:1; + u8 sgi_en:1; + u8 rxsc:2; + u8 reserve:4; +} __packed; + +/* Define a macro that takes a le32 word, converts it to host ordering, + * right shifts by a specified count, creates a mask of the specified + * bit count, and extracts that number of bits. + */ + +#define SHIFT_AND_MASK_LE(__pdesc, __shift, __bits) \ + ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \ + BIT_LEN_MASK_32(__bits)) + +/* Define a macro that clears a bit field in an le32 word and + * sets the specified value into that bit field. The resulting + * value remains in le32 ordering; however, it is properly converted + * to host ordering for the clear and set operations before conversion + * back to le32. + */ + +#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val) \ + (*(__le32 *)(__pdesc) = \ + (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) & \ + (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) | \ + (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift))))); + +/* macros to read various fields in RX descriptor */ + +/* DWORD 0 */ +#define GET_RX_DESC_PKT_LEN(__rxdesc) \ + SHIFT_AND_MASK_LE((__rxdesc), 0, 14) +#define GET_RX_DESC_CRC32(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 14, 1) +#define GET_RX_DESC_ICV(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 15, 1) +#define GET_RX_DESC_DRVINFO_SIZE(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 16, 4) +#define GET_RX_DESC_SECURITY(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 20, 3) +#define GET_RX_DESC_QOS(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 23, 1) +#define GET_RX_DESC_SHIFT(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 24, 2) +#define GET_RX_DESC_PHY_STATUS(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 26, 1) +#define GET_RX_DESC_SWDEC(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 27, 1) +#define GET_RX_DESC_LAST_SEG(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 28, 1) +#define GET_RX_DESC_FIRST_SEG(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 29, 1) +#define GET_RX_DESC_EOR(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 30, 1) +#define GET_RX_DESC_OWN(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc, 31, 1) + +/* DWORD 1 */ +#define GET_RX_DESC_MACID(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 0, 5) +#define GET_RX_DESC_TID(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 5, 4) +#define GET_RX_DESC_PAGGR(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 14, 1) +#define GET_RX_DESC_FAGGR(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 15, 1) +#define GET_RX_DESC_A1_FIT(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 16, 4) +#define GET_RX_DESC_A2_FIT(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 20, 4) +#define GET_RX_DESC_PAM(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 24, 1) +#define GET_RX_DESC_PWR(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 25, 1) +#define GET_RX_DESC_MORE_DATA(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 26, 1) +#define GET_RX_DESC_MORE_FRAG(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 27, 1) +#define GET_RX_DESC_TYPE(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 28, 2) +#define GET_RX_DESC_MC(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 30, 1) +#define GET_RX_DESC_BC(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+4, 31, 1) + +/* DWORD 2 */ +#define GET_RX_DESC_SEQ(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+8, 0, 12) +#define GET_RX_DESC_FRAG(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+8, 12, 4) +#define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+8, 16, 8) +#define GET_RX_DESC_NEXT_IND(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+8, 30, 1) + +/* DWORD 3 */ +#define GET_RX_DESC_RX_MCS(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 0, 6) +#define GET_RX_DESC_RX_HT(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 6, 1) +#define GET_RX_DESC_AMSDU(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 7, 1) +#define GET_RX_DESC_SPLCP(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 8, 1) +#define GET_RX_DESC_BW(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 9, 1) +#define GET_RX_DESC_HTC(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 10, 1) +#define GET_RX_DESC_TCP_CHK_RPT(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 11, 1) +#define GET_RX_DESC_IP_CHK_RPT(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 12, 1) +#define GET_RX_DESC_TCP_CHK_VALID(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 13, 1) +#define GET_RX_DESC_HWPC_ERR(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 14, 1) +#define GET_RX_DESC_HWPC_IND(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 15, 1) +#define GET_RX_DESC_IV0(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+12, 16, 16) + +/* DWORD 4 */ +#define GET_RX_DESC_IV1(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+16, 0, 32) + +/* DWORD 5 */ +#define GET_RX_DESC_TSFL(__rxdesc) \ + SHIFT_AND_MASK_LE(__rxdesc+20, 0, 32) + +/*======================= tx desc ============================================*/ + +/* macros to set various fields in TX descriptor */ + +/* Dword 0 */ +#define SET_TX_DESC_PKT_SIZE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc, 0, 16, __value) +#define SET_TX_DESC_OFFSET(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc, 16, 8, __value) +#define SET_TX_DESC_BMC(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc, 24, 1, __value) +#define SET_TX_DESC_HTC(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc, 25, 1, __value) +#define SET_TX_DESC_LAST_SEG(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc, 26, 1, __value) +#define SET_TX_DESC_FIRST_SEG(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc, 27, 1, __value) +#define SET_TX_DESC_LINIP(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc, 28, 1, __value) +#define SET_TX_DESC_NO_ACM(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc, 29, 1, __value) +#define SET_TX_DESC_GF(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc, 30, 1, __value) +#define SET_TX_DESC_OWN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc, 31, 1, __value) + + +/* Dword 1 */ +#define SET_TX_DESC_MACID(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 0, 5, __value) +#define SET_TX_DESC_AGG_ENABLE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 5, 1, __value) +#define SET_TX_DESC_AGG_BREAK(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 6, 1, __value) +#define SET_TX_DESC_RDG_ENABLE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 7, 1, __value) +#define SET_TX_DESC_QUEUE_SEL(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 8, 5, __value) +#define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 13, 1, __value) +#define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 14, 1, __value) +#define SET_TX_DESC_PIFS(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 15, 1, __value) +#define SET_TX_DESC_RATE_ID(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value) +#define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value) +#define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 20, 1, __value) +#define SET_TX_DESC_EN_DESC_ID(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 21, 1, __value) +#define SET_TX_DESC_SEC_TYPE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 22, 2, __value) +#define SET_TX_DESC_PKT_OFFSET(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+4, 26, 5, __value) + +/* Dword 2 */ +#define SET_TX_DESC_RTS_RC(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 0, 6, __value) +#define SET_TX_DESC_DATA_RC(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 6, 6, __value) +#define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 14, 2, __value) +#define SET_TX_DESC_MORE_FRAG(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 17, 1, __value) +#define SET_TX_DESC_RAW(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 18, 1, __value) +#define SET_TX_DESC_CCX(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 19, 1, __value) +#define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 20, 3, __value) +#define SET_TX_DESC_ANTSEL_A(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 24, 1, __value) +#define SET_TX_DESC_ANTSEL_B(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 25, 1, __value) +#define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 26, 2, __value) +#define SET_TX_DESC_TX_ANTL(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 28, 2, __value) +#define SET_TX_DESC_TX_ANT_HT(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+8, 30, 2, __value) + +/* Dword 3 */ +#define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+12, 0, 8, __value) +#define SET_TX_DESC_TAIL_PAGE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+12, 8, 8, __value) +#define SET_TX_DESC_SEQ(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+12, 16, 12, __value) +#define SET_TX_DESC_PKT_ID(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+12, 28, 4, __value) + +/* Dword 4 */ +#define SET_TX_DESC_RTS_RATE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 0, 5, __value) +#define SET_TX_DESC_AP_DCFE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 5, 1, __value) +#define SET_TX_DESC_QOS(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 6, 1, __value) +#define SET_TX_DESC_HWSEQ_EN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 7, 1, __value) +#define SET_TX_DESC_USE_RATE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 8, 1, __value) +#define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 9, 1, __value) +#define SET_TX_DESC_DISABLE_FB(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 10, 1, __value) +#define SET_TX_DESC_CTS2SELF(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 11, 1, __value) +#define SET_TX_DESC_RTS_ENABLE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 12, 1, __value) +#define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 13, 1, __value) +#define SET_TX_DESC_WAIT_DCTS(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 18, 1, __value) +#define SET_TX_DESC_CTS2AP_EN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 19, 1, __value) +#define SET_TX_DESC_DATA_SC(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 20, 2, __value) +#define SET_TX_DESC_DATA_STBC(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 22, 2, __value) +#define SET_TX_DESC_DATA_SHORT(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 24, 1, __value) +#define SET_TX_DESC_DATA_BW(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 25, 1, __value) +#define SET_TX_DESC_RTS_SHORT(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 26, 1, __value) +#define SET_TX_DESC_RTS_BW(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 27, 1, __value) +#define SET_TX_DESC_RTS_SC(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 28, 2, __value) +#define SET_TX_DESC_RTS_STBC(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+16, 30, 2, __value) + +/* Dword 5 */ +#define SET_TX_DESC_TX_RATE(__pdesc, __val) \ + SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val) +#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \ + SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val) +#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \ + SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val) +#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+20, 8, 5, __value) +#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+20, 13, 4, __value) +#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+20, 17, 1, __value) +#define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+20, 18, 6, __value) +#define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+20, 24, 8, __value) + +/* Dword 6 */ +#define SET_TX_DESC_TXAGC_A(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+24, 0, 5, __value) +#define SET_TX_DESC_TXAGC_B(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+24, 5, 5, __value) +#define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+24, 10, 1, __value) +#define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+24, 11, 5, __value) +#define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+24, 16, 4, __value) +#define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+24, 20, 4, __value) +#define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+24, 24, 4, __value) +#define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+24, 28, 4, __value) + +/* Dword 7 */ +#define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+28, 0, 16, __value) +#define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+28, 16, 4, __value) +#define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+28, 20, 4, __value) +#define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+28, 24, 4, __value) +#define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \ + SET_BITS_OFFSET_LE(__txdesc+28, 28, 4, __value) + + +int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw); +u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index); +bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *stats, + struct ieee80211_rx_status *rx_status, + u8 *p_desc, struct sk_buff *skb); +void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb); +void rtl8192c_rx_segregate_hdl(struct ieee80211_hw *, struct sk_buff *, + struct sk_buff_head *); +void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb); +int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb, + struct sk_buff *skb); +struct sk_buff *rtl8192c_tx_aggregate_hdl(struct ieee80211_hw *, + struct sk_buff_head *); +void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, + struct ieee80211_tx_info *info, struct sk_buff *skb, + unsigned int queue_index); +void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, + u32 buffer_len, bool bIsPsPoll); +void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, + u8 *pdesc, bool b_firstseg, + bool b_lastseg, struct sk_buff *skb); +bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); + +#endif diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c new file mode 100644 index 000000000000..a4b2613d6a8c --- /dev/null +++ b/drivers/net/wireless/rtlwifi/usb.c @@ -0,0 +1,1035 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + *****************************************************************************/ +#include <linux/usb.h> +#include "core.h" +#include "wifi.h" +#include "usb.h" +#include "base.h" +#include "ps.h" + +#define REALTEK_USB_VENQT_READ 0xC0 +#define REALTEK_USB_VENQT_WRITE 0x40 +#define REALTEK_USB_VENQT_CMD_REQ 0x05 +#define REALTEK_USB_VENQT_CMD_IDX 0x00 + +#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254 + +static void usbctrl_async_callback(struct urb *urb) +{ + if (urb) + kfree(urb->context); +} + +static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request, + u16 value, u16 index, void *pdata, + u16 len) +{ + int rc; + unsigned int pipe; + u8 reqtype; + struct usb_ctrlrequest *dr; + struct urb *urb; + struct rtl819x_async_write_data { + u8 data[REALTEK_USB_VENQT_MAX_BUF_SIZE]; + struct usb_ctrlrequest dr; + } *buf; + + pipe = usb_sndctrlpipe(udev, 0); /* write_out */ + reqtype = REALTEK_USB_VENQT_WRITE; + + buf = kmalloc(sizeof(*buf), GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + kfree(buf); + return -ENOMEM; + } + + dr = &buf->dr; + + dr->bRequestType = reqtype; + dr->bRequest = request; + dr->wValue = cpu_to_le16(value); + dr->wIndex = cpu_to_le16(index); + dr->wLength = cpu_to_le16(len); + memcpy(buf, pdata, len); + usb_fill_control_urb(urb, udev, pipe, + (unsigned char *)dr, buf, len, + usbctrl_async_callback, buf); + rc = usb_submit_urb(urb, GFP_ATOMIC); + if (rc < 0) + kfree(buf); + usb_free_urb(urb); + return rc; +} + +static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request, + u16 value, u16 index, void *pdata, + u16 len) +{ + unsigned int pipe; + int status; + u8 reqtype; + + pipe = usb_rcvctrlpipe(udev, 0); /* read_in */ + reqtype = REALTEK_USB_VENQT_READ; + + status = usb_control_msg(udev, pipe, request, reqtype, value, index, + pdata, len, 0); /* max. timeout */ + + if (status < 0) + printk(KERN_ERR "reg 0x%x, usbctrl_vendorreq TimeOut! " + "status:0x%x value=0x%x\n", value, status, + *(u32 *)pdata); + return status; +} + +static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len) +{ + u8 request; + u16 wvalue; + u16 index; + u32 *data; + u32 ret; + + data = kmalloc(sizeof(u32), GFP_KERNEL); + if (!data) + return -ENOMEM; + request = REALTEK_USB_VENQT_CMD_REQ; + index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */ + + wvalue = (u16)addr; + _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len); + ret = *data; + kfree(data); + return ret; +} + +static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr) +{ + struct device *dev = rtlpriv->io.dev; + + return (u8)_usb_read_sync(to_usb_device(dev), addr, 1); +} + +static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr) +{ + struct device *dev = rtlpriv->io.dev; + + return (u16)_usb_read_sync(to_usb_device(dev), addr, 2); +} + +static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr) +{ + struct device *dev = rtlpriv->io.dev; + + return _usb_read_sync(to_usb_device(dev), addr, 4); +} + +static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val, + u16 len) +{ + u8 request; + u16 wvalue; + u16 index; + u32 data; + + request = REALTEK_USB_VENQT_CMD_REQ; + index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */ + wvalue = (u16)(addr&0x0000ffff); + data = val; + _usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data, + len); +} + +static void _usb_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val) +{ + struct device *dev = rtlpriv->io.dev; + + _usb_write_async(to_usb_device(dev), addr, val, 1); +} + +static void _usb_write16_async(struct rtl_priv *rtlpriv, u32 addr, u16 val) +{ + struct device *dev = rtlpriv->io.dev; + + _usb_write_async(to_usb_device(dev), addr, val, 2); +} + +static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val) +{ + struct device *dev = rtlpriv->io.dev; + + _usb_write_async(to_usb_device(dev), addr, val, 4); +} + +static int _usb_nbytes_read_write(struct usb_device *udev, bool read, u32 addr, + u16 len, u8 *pdata) +{ + int status; + u8 request; + u16 wvalue; + u16 index; + + request = REALTEK_USB_VENQT_CMD_REQ; + index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */ + wvalue = (u16)addr; + if (read) + status = _usbctrl_vendorreq_sync_read(udev, request, wvalue, + index, pdata, len); + else + status = _usbctrl_vendorreq_async_write(udev, request, wvalue, + index, pdata, len); + return status; +} + +static int _usb_readN_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len, + u8 *pdata) +{ + struct device *dev = rtlpriv->io.dev; + + return _usb_nbytes_read_write(to_usb_device(dev), true, addr, len, + pdata); +} + +static int _usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, u16 len, + u8 *pdata) +{ + struct device *dev = rtlpriv->io.dev; + + return _usb_nbytes_read_write(to_usb_device(dev), false, addr, len, + pdata); +} + +static void _rtl_usb_io_handler_init(struct device *dev, + struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->io.dev = dev; + mutex_init(&rtlpriv->io.bb_mutex); + rtlpriv->io.write8_async = _usb_write8_async; + rtlpriv->io.write16_async = _usb_write16_async; + rtlpriv->io.write32_async = _usb_write32_async; + rtlpriv->io.writeN_async = _usb_writeN_async; + rtlpriv->io.read8_sync = _usb_read8_sync; + rtlpriv->io.read16_sync = _usb_read16_sync; + rtlpriv->io.read32_sync = _usb_read32_sync; + rtlpriv->io.readN_sync = _usb_readN_sync; +} + +static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + mutex_destroy(&rtlpriv->io.bb_mutex); +} + +/** + * + * Default aggregation handler. Do nothing and just return the oldest skb. + */ +static struct sk_buff *_none_usb_tx_aggregate_hdl(struct ieee80211_hw *hw, + struct sk_buff_head *list) +{ + return skb_dequeue(list); +} + +#define IS_HIGH_SPEED_USB(udev) \ + ((USB_SPEED_HIGH == (udev)->speed) ? true : false) + +static int _rtl_usb_init_tx(struct ieee80211_hw *hw) +{ + u32 i; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + rtlusb->max_bulk_out_size = IS_HIGH_SPEED_USB(rtlusb->udev) + ? USB_HIGH_SPEED_BULK_SIZE + : USB_FULL_SPEED_BULK_SIZE; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("USB Max Bulk-out Size=%d\n", + rtlusb->max_bulk_out_size)); + + for (i = 0; i < __RTL_TXQ_NUM; i++) { + u32 ep_num = rtlusb->ep_map.ep_mapping[i]; + if (!ep_num) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("Invalid endpoint map setting!\n")); + return -EINVAL; + } + } + + rtlusb->usb_tx_post_hdl = + rtlpriv->cfg->usb_interface_cfg->usb_tx_post_hdl; + rtlusb->usb_tx_cleanup = + rtlpriv->cfg->usb_interface_cfg->usb_tx_cleanup; + rtlusb->usb_tx_aggregate_hdl = + (rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl) + ? rtlpriv->cfg->usb_interface_cfg->usb_tx_aggregate_hdl + : &_none_usb_tx_aggregate_hdl; + + init_usb_anchor(&rtlusb->tx_submitted); + for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { + skb_queue_head_init(&rtlusb->tx_skb_queue[i]); + init_usb_anchor(&rtlusb->tx_pending[i]); + } + return 0; +} + +static int _rtl_usb_init_rx(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); + + rtlusb->rx_max_size = rtlpriv->cfg->usb_interface_cfg->rx_max_size; + rtlusb->rx_urb_num = rtlpriv->cfg->usb_interface_cfg->rx_urb_num; + rtlusb->in_ep = rtlpriv->cfg->usb_interface_cfg->in_ep_num; + rtlusb->usb_rx_hdl = rtlpriv->cfg->usb_interface_cfg->usb_rx_hdl; + rtlusb->usb_rx_segregate_hdl = + rtlpriv->cfg->usb_interface_cfg->usb_rx_segregate_hdl; + + printk(KERN_INFO "rtl8192cu: rx_max_size %d, rx_urb_num %d, in_ep %d\n", + rtlusb->rx_max_size, rtlusb->rx_urb_num, rtlusb->in_ep); + init_usb_anchor(&rtlusb->rx_submitted); + return 0; +} + +static int _rtl_usb_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); + int err; + u8 epidx; + struct usb_interface *usb_intf = rtlusb->intf; + u8 epnums = usb_intf->cur_altsetting->desc.bNumEndpoints; + + rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0; + for (epidx = 0; epidx < epnums; epidx++) { + struct usb_endpoint_descriptor *pep_desc; + pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc; + + if (usb_endpoint_dir_in(pep_desc)) + rtlusb->in_ep_nums++; + else if (usb_endpoint_dir_out(pep_desc)) + rtlusb->out_ep_nums++; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, + ("USB EP(0x%02x), MaxPacketSize=%d ,Interval=%d.\n", + pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize, + pep_desc->bInterval)); + } + if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) + return -EINVAL ; + + /* usb endpoint mapping */ + err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw); + rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq; + _rtl_usb_init_tx(hw); + _rtl_usb_init_rx(hw); + return err; +} + +static int _rtl_usb_init_sw(struct ieee80211_hw *hw) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + rtlhal->hw = hw; + ppsc->inactiveps = false; + ppsc->leisure_ps = false; + ppsc->fwctrl_lps = false; + ppsc->reg_fwctrl_lps = 3; + ppsc->reg_max_lps_awakeintvl = 5; + ppsc->fwctrl_psmode = FW_PS_DTIM_MODE; + + /* IBSS */ + mac->beacon_interval = 100; + + /* AMPDU */ + mac->min_space_cfg = 0; + mac->max_mss_density = 0; + + /* set sane AMPDU defaults */ + mac->current_ampdu_density = 7; + mac->current_ampdu_factor = 3; + + /* QOS */ + rtlusb->acm_method = eAcmWay2_SW; + + /* IRQ */ + /* HIMR - turn all on */ + rtlusb->irq_mask[0] = 0xFFFFFFFF; + /* HIMR_EX - turn all on */ + rtlusb->irq_mask[1] = 0xFFFFFFFF; + rtlusb->disableHWSM = true; + return 0; +} + +#define __RADIO_TAP_SIZE_RSV 32 + +static void _rtl_rx_completed(struct urb *urb); + +static struct sk_buff *_rtl_prep_rx_urb(struct ieee80211_hw *hw, + struct rtl_usb *rtlusb, + struct urb *urb, + gfp_t gfp_mask) +{ + struct sk_buff *skb; + struct rtl_priv *rtlpriv = rtl_priv(hw); + + skb = __dev_alloc_skb((rtlusb->rx_max_size + __RADIO_TAP_SIZE_RSV), + gfp_mask); + if (!skb) { + RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, + ("Failed to __dev_alloc_skb!!\n")) + return ERR_PTR(-ENOMEM); + } + + /* reserve some space for mac80211's radiotap */ + skb_reserve(skb, __RADIO_TAP_SIZE_RSV); + usb_fill_bulk_urb(urb, rtlusb->udev, + usb_rcvbulkpipe(rtlusb->udev, rtlusb->in_ep), + skb->data, min(skb_tailroom(skb), + (int)rtlusb->rx_max_size), + _rtl_rx_completed, skb); + + _rtl_install_trx_info(rtlusb, skb, rtlusb->in_ep); + return skb; +} + +#undef __RADIO_TAP_SIZE_RSV + +static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 *rxdesc = skb->data; + struct ieee80211_hdr *hdr; + bool unicast = false; + __le16 fc; + struct ieee80211_rx_status rx_status = {0}; + struct rtl_stats stats = { + .signal = 0, + .noise = -98, + .rate = 0, + }; + + skb_pull(skb, RTL_RX_DESC_SIZE); + rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); + skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); + hdr = (struct ieee80211_hdr *)(skb->data); + fc = hdr->frame_control; + if (!stats.crc) { + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + + if (is_broadcast_ether_addr(hdr->addr1)) { + /*TODO*/; + } else if (is_multicast_ether_addr(hdr->addr1)) { + /*TODO*/ + } else { + unicast = true; + rtlpriv->stats.rxbytesunicast += skb->len; + } + + rtl_is_special_data(hw, skb, false); + + if (ieee80211_is_data(fc)) { + rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); + + if (unicast) + rtlpriv->link_info.num_rx_inperiod++; + } + } +} + +static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 *rxdesc = skb->data; + struct ieee80211_hdr *hdr; + bool unicast = false; + __le16 fc; + struct ieee80211_rx_status rx_status = {0}; + struct rtl_stats stats = { + .signal = 0, + .noise = -98, + .rate = 0, + }; + + skb_pull(skb, RTL_RX_DESC_SIZE); + rtlpriv->cfg->ops->query_rx_desc(hw, &stats, &rx_status, rxdesc, skb); + skb_pull(skb, (stats.rx_drvinfo_size + stats.rx_bufshift)); + hdr = (struct ieee80211_hdr *)(skb->data); + fc = hdr->frame_control; + if (!stats.crc) { + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + + if (is_broadcast_ether_addr(hdr->addr1)) { + /*TODO*/; + } else if (is_multicast_ether_addr(hdr->addr1)) { + /*TODO*/ + } else { + unicast = true; + rtlpriv->stats.rxbytesunicast += skb->len; + } + + rtl_is_special_data(hw, skb, false); + + if (ieee80211_is_data(fc)) { + rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX); + + if (unicast) + rtlpriv->link_info.num_rx_inperiod++; + } + if (likely(rtl_action_proc(hw, skb, false))) { + struct sk_buff *uskb = NULL; + u8 *pdata; + + uskb = dev_alloc_skb(skb->len + 128); + memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, + sizeof(rx_status)); + pdata = (u8 *)skb_put(uskb, skb->len); + memcpy(pdata, skb->data, skb->len); + dev_kfree_skb_any(skb); + ieee80211_rx_irqsafe(hw, uskb); + } else { + dev_kfree_skb_any(skb); + } + } +} + +static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct sk_buff *_skb; + struct sk_buff_head rx_queue; + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + skb_queue_head_init(&rx_queue); + if (rtlusb->usb_rx_segregate_hdl) + rtlusb->usb_rx_segregate_hdl(hw, skb, &rx_queue); + WARN_ON(skb_queue_empty(&rx_queue)); + while (!skb_queue_empty(&rx_queue)) { + _skb = skb_dequeue(&rx_queue); + _rtl_usb_rx_process_agg(hw, skb); + ieee80211_rx_irqsafe(hw, skb); + } +} + +static void _rtl_rx_completed(struct urb *_urb) +{ + struct sk_buff *skb = (struct sk_buff *)_urb->context; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0]; + struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); + struct rtl_priv *rtlpriv = rtl_priv(hw); + int err = 0; + + if (unlikely(IS_USB_STOP(rtlusb))) + goto free; + + if (likely(0 == _urb->status)) { + /* If this code were moved to work queue, would CPU + * utilization be improved? NOTE: We shall allocate another skb + * and reuse the original one. + */ + skb_put(skb, _urb->actual_length); + + if (likely(!rtlusb->usb_rx_segregate_hdl)) { + struct sk_buff *_skb; + _rtl_usb_rx_process_noagg(hw, skb); + _skb = _rtl_prep_rx_urb(hw, rtlusb, _urb, GFP_ATOMIC); + if (IS_ERR(_skb)) { + err = PTR_ERR(_skb); + RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, + ("Can't allocate skb for bulk IN!\n")); + return; + } + skb = _skb; + } else{ + /* TO DO */ + _rtl_rx_pre_process(hw, skb); + printk(KERN_ERR "rtlwifi: rx agg not supported\n"); + } + goto resubmit; + } + + switch (_urb->status) { + /* disconnect */ + case -ENOENT: + case -ECONNRESET: + case -ENODEV: + case -ESHUTDOWN: + goto free; + default: + break; + } + +resubmit: + skb_reset_tail_pointer(skb); + skb_trim(skb, 0); + + usb_anchor_urb(_urb, &rtlusb->rx_submitted); + err = usb_submit_urb(_urb, GFP_ATOMIC); + if (unlikely(err)) { + usb_unanchor_urb(_urb); + goto free; + } + return; + +free: + dev_kfree_skb_irq(skb); +} + +static int _rtl_usb_receive(struct ieee80211_hw *hw) +{ + struct urb *urb; + struct sk_buff *skb; + int err; + int i; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + WARN_ON(0 == rtlusb->rx_urb_num); + /* 1600 == 1514 + max WLAN header + rtk info */ + WARN_ON(rtlusb->rx_max_size < 1600); + + for (i = 0; i < rtlusb->rx_urb_num; i++) { + err = -ENOMEM; + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) { + RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, + ("Failed to alloc URB!!\n")) + goto err_out; + } + + skb = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL); + if (IS_ERR(skb)) { + RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, + ("Failed to prep_rx_urb!!\n")) + err = PTR_ERR(skb); + goto err_out; + } + + usb_anchor_urb(urb, &rtlusb->rx_submitted); + err = usb_submit_urb(urb, GFP_KERNEL); + if (err) + goto err_out; + usb_free_urb(urb); + } + return 0; + +err_out: + usb_kill_anchored_urbs(&rtlusb->rx_submitted); + return err; +} + +static int rtl_usb_start(struct ieee80211_hw *hw) +{ + int err; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + err = rtlpriv->cfg->ops->hw_init(hw); + rtl_init_rx_config(hw); + + /* Enable software */ + SET_USB_START(rtlusb); + /* should after adapter start and interrupt enable. */ + set_hal_start(rtlhal); + + /* Start bulk IN */ + _rtl_usb_receive(hw); + + return err; +} +/** + * + * + */ + +/*======================= tx =========================================*/ +static void rtl_usb_cleanup(struct ieee80211_hw *hw) +{ + u32 i; + struct sk_buff *_skb; + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct ieee80211_tx_info *txinfo; + + SET_USB_STOP(rtlusb); + + /* clean up rx stuff. */ + usb_kill_anchored_urbs(&rtlusb->rx_submitted); + + /* clean up tx stuff */ + for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { + while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) { + rtlusb->usb_tx_cleanup(hw, _skb); + txinfo = IEEE80211_SKB_CB(_skb); + ieee80211_tx_info_clear_status(txinfo); + txinfo->flags |= IEEE80211_TX_STAT_ACK; + ieee80211_tx_status_irqsafe(hw, _skb); + } + usb_kill_anchored_urbs(&rtlusb->tx_pending[i]); + } + usb_kill_anchored_urbs(&rtlusb->tx_submitted); +} + +/** + * + * We may add some struct into struct rtl_usb later. Do deinit here. + * + */ +static void rtl_usb_deinit(struct ieee80211_hw *hw) +{ + rtl_usb_cleanup(hw); +} + +static void rtl_usb_stop(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + /* should after adapter start and interrupt enable. */ + set_hal_stop(rtlhal); + /* Enable software */ + SET_USB_STOP(rtlusb); + rtl_usb_deinit(hw); + rtlpriv->cfg->ops->hw_disable(hw); +} + +static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb) +{ + int err; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + usb_anchor_urb(_urb, &rtlusb->tx_submitted); + err = usb_submit_urb(_urb, GFP_ATOMIC); + if (err < 0) { + struct sk_buff *skb; + + RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, + ("Failed to submit urb.\n")); + usb_unanchor_urb(_urb); + skb = (struct sk_buff *)_urb->context; + kfree_skb(skb); + } + usb_free_urb(_urb); +} + +static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb, + struct sk_buff *skb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct ieee80211_tx_info *txinfo; + + rtlusb->usb_tx_post_hdl(hw, urb, skb); + skb_pull(skb, RTL_TX_HEADER_SIZE); + txinfo = IEEE80211_SKB_CB(skb); + ieee80211_tx_info_clear_status(txinfo); + txinfo->flags |= IEEE80211_TX_STAT_ACK; + + if (urb->status) { + RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, + ("Urb has error status 0x%X\n", urb->status)); + goto out; + } + /* TODO: statistics */ +out: + ieee80211_tx_status_irqsafe(hw, skb); + return urb->status; +} + +static void _rtl_tx_complete(struct urb *urb) +{ + struct sk_buff *skb = (struct sk_buff *)urb->context; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct rtl_usb *rtlusb = (struct rtl_usb *)info->rate_driver_data[0]; + struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf); + int err; + + if (unlikely(IS_USB_STOP(rtlusb))) + return; + err = _usb_tx_post(hw, urb, skb); + if (err) { + /* Ignore error and keep issuiing other urbs */ + return; + } +} + +static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw, + struct sk_buff *skb, u32 ep_num) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct urb *_urb; + + WARN_ON(NULL == skb); + _urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!_urb) { + RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, + ("Can't allocate URB for bulk out!\n")); + kfree_skb(skb); + return NULL; + } + _rtl_install_trx_info(rtlusb, skb, ep_num); + usb_fill_bulk_urb(_urb, rtlusb->udev, usb_sndbulkpipe(rtlusb->udev, + ep_num), skb->data, skb->len, _rtl_tx_complete, skb); + _urb->transfer_flags |= URB_ZERO_PACKET; + return _urb; +} + +static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, + enum rtl_txq qnum) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + u32 ep_num; + struct urb *_urb = NULL; + struct sk_buff *_skb = NULL; + struct sk_buff_head *skb_list; + struct usb_anchor *urb_list; + + WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl); + if (unlikely(IS_USB_STOP(rtlusb))) { + RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG, + ("USB device is stopping...\n")); + kfree_skb(skb); + return; + } + ep_num = rtlusb->ep_map.ep_mapping[qnum]; + skb_list = &rtlusb->tx_skb_queue[ep_num]; + _skb = skb; + _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num); + if (unlikely(!_urb)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("Can't allocate urb. Drop skb!\n")); + return; + } + urb_list = &rtlusb->tx_pending[ep_num]; + _rtl_submit_tx_urb(hw, _urb); +} + +static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, + u16 hw_queue) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct rtl_tx_desc *pdesc = NULL; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); + __le16 fc = hdr->frame_control; + u8 *pda_addr = hdr->addr1; + /* ssn */ + u8 *qc = NULL; + u8 tid = 0; + u16 seq_number = 0; + + if (ieee80211_is_mgmt(fc)) + rtl_tx_mgmt_proc(hw, skb); + rtl_action_proc(hw, skb, true); + if (is_multicast_ether_addr(pda_addr)) + rtlpriv->stats.txbytesmulticast += skb->len; + else if (is_broadcast_ether_addr(pda_addr)) + rtlpriv->stats.txbytesbroadcast += skb->len; + else + rtlpriv->stats.txbytesunicast += skb->len; + if (ieee80211_is_data_qos(fc)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; + seq_number = (le16_to_cpu(hdr->seq_ctrl) & + IEEE80211_SCTL_SEQ) >> 4; + seq_number += 1; + seq_number <<= 4; + } + rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc, info, skb, + hw_queue); + if (!ieee80211_has_morefrags(hdr->frame_control)) { + if (qc) + mac->tids[tid].seq_number = seq_number; + } + if (ieee80211_is_data(fc)) + rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX); +} + +static int rtl_usb_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +{ + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); + __le16 fc = hdr->frame_control; + u16 hw_queue; + + if (unlikely(is_hal_stop(rtlhal))) + goto err_free; + hw_queue = rtlusb->usb_mq_to_hwq(fc, skb_get_queue_mapping(skb)); + _rtl_usb_tx_preprocess(hw, skb, hw_queue); + _rtl_usb_transmit(hw, skb, hw_queue); + return NETDEV_TX_OK; + +err_free: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static bool rtl_usb_tx_chk_waitq_insert(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + return false; +} + +static struct rtl_intf_ops rtl_usb_ops = { + .adapter_start = rtl_usb_start, + .adapter_stop = rtl_usb_stop, + .adapter_tx = rtl_usb_tx, + .waitq_insert = rtl_usb_tx_chk_waitq_insert, +}; + +int __devinit rtl_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + int err; + struct ieee80211_hw *hw = NULL; + struct rtl_priv *rtlpriv = NULL; + struct usb_device *udev; + struct rtl_usb_priv *usb_priv; + + hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) + + sizeof(struct rtl_usb_priv), &rtl_ops); + if (!hw) { + RT_ASSERT(false, ("%s : ieee80211 alloc failed\n", __func__)); + return -ENOMEM; + } + rtlpriv = hw->priv; + SET_IEEE80211_DEV(hw, &intf->dev); + udev = interface_to_usbdev(intf); + usb_get_dev(udev); + usb_priv = rtl_usbpriv(hw); + memset(usb_priv, 0, sizeof(*usb_priv)); + usb_priv->dev.intf = intf; + usb_priv->dev.udev = udev; + usb_set_intfdata(intf, hw); + /* init cfg & intf_ops */ + rtlpriv->rtlhal.interface = INTF_USB; + rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_info); + rtlpriv->intf_ops = &rtl_usb_ops; + rtl_dbgp_flag_init(hw); + /* Init IO handler */ + _rtl_usb_io_handler_init(&udev->dev, hw); + rtlpriv->cfg->ops->read_chip_version(hw); + /*like read eeprom and so on */ + rtlpriv->cfg->ops->read_eeprom_info(hw); + if (rtlpriv->cfg->ops->init_sw_vars(hw)) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("Can't init_sw_vars.\n")); + goto error_out; + } + rtlpriv->cfg->ops->init_sw_leds(hw); + err = _rtl_usb_init(hw); + err = _rtl_usb_init_sw(hw); + /* Init mac80211 sw */ + err = rtl_init_core(hw); + if (err) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("Can't allocate sw for mac80211.\n")); + goto error_out; + } + + /*init rfkill */ + /* rtl_init_rfkill(hw); */ + + err = ieee80211_register_hw(hw); + if (err) { + RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, + ("Can't register mac80211 hw.\n")); + goto error_out; + } else { + rtlpriv->mac80211.mac80211_registered = 1; + } + set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); + return 0; +error_out: + rtl_deinit_core(hw); + _rtl_usb_io_handler_release(hw); + ieee80211_free_hw(hw); + usb_put_dev(udev); + return -ENODEV; +} +EXPORT_SYMBOL(rtl_usb_probe); + +void rtl_usb_disconnect(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(intf); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw)); + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + if (unlikely(!rtlpriv)) + return; + /*ieee80211_unregister_hw will call ops_stop */ + if (rtlmac->mac80211_registered == 1) { + ieee80211_unregister_hw(hw); + rtlmac->mac80211_registered = 0; + } else { + rtl_deinit_deferred_work(hw); + rtlpriv->intf_ops->adapter_stop(hw); + } + /*deinit rfkill */ + /* rtl_deinit_rfkill(hw); */ + rtl_usb_deinit(hw); + rtl_deinit_core(hw); + rtlpriv->cfg->ops->deinit_sw_leds(hw); + rtlpriv->cfg->ops->deinit_sw_vars(hw); + _rtl_usb_io_handler_release(hw); + usb_put_dev(rtlusb->udev); + usb_set_intfdata(intf, NULL); + ieee80211_free_hw(hw); +} +EXPORT_SYMBOL(rtl_usb_disconnect); + +int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message) +{ + return 0; +} +EXPORT_SYMBOL(rtl_usb_suspend); + +int rtl_usb_resume(struct usb_interface *pusb_intf) +{ + return 0; +} +EXPORT_SYMBOL(rtl_usb_resume); diff --git a/drivers/net/wireless/rtlwifi/usb.h b/drivers/net/wireless/rtlwifi/usb.h new file mode 100644 index 000000000000..abadfe918d30 --- /dev/null +++ b/drivers/net/wireless/rtlwifi/usb.h @@ -0,0 +1,164 @@ +/****************************************************************************** + * + * Copyright(c) 2009-2011 Realtek Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * + * The full GNU General Public License is included in this distribution in the + * file called LICENSE. + * + * Contact Information: + * wlanfae <wlanfae@realtek.com> + * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, + * Hsinchu 300, Taiwan. + * + *****************************************************************************/ + +#ifndef __RTL_USB_H__ +#define __RTL_USB_H__ + +#include <linux/usb.h> +#include <linux/skbuff.h> + +#define RTL_USB_DEVICE(vend, prod, cfg) \ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \ + .idVendor = (vend), \ + .idProduct = (prod), \ + .driver_info = (kernel_ulong_t)&(cfg) + +#define USB_HIGH_SPEED_BULK_SIZE 512 +#define USB_FULL_SPEED_BULK_SIZE 64 + + +#define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */ +#define RTL_USB_MAX_EP_NUM 6 /* max ep number */ +#define RTL_USB_MAX_TX_URBS_NUM 8 + +enum rtl_txq { + /* These definitions shall be consistent with value + * returned by skb_get_queue_mapping + *------------------------------------*/ + RTL_TXQ_BK, + RTL_TXQ_BE, + RTL_TXQ_VI, + RTL_TXQ_VO, + /*------------------------------------*/ + RTL_TXQ_BCN, + RTL_TXQ_MGT, + RTL_TXQ_HI, + + /* Must be last */ + __RTL_TXQ_NUM, +}; + +struct rtl_ep_map { + u32 ep_mapping[__RTL_TXQ_NUM]; +}; + +struct _trx_info { + struct rtl_usb *rtlusb; + u32 ep_num; +}; + +static inline void _rtl_install_trx_info(struct rtl_usb *rtlusb, + struct sk_buff *skb, + u32 ep_num) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + info->rate_driver_data[0] = rtlusb; + info->rate_driver_data[1] = (void *)(__kernel_size_t)ep_num; +} + + +/* Add suspend/resume later */ +enum rtl_usb_state { + USB_STATE_STOP = 0, + USB_STATE_START = 1, +}; + +#define IS_USB_STOP(rtlusb_ptr) (USB_STATE_STOP == (rtlusb_ptr)->state) +#define IS_USB_START(rtlusb_ptr) (USB_STATE_START == (rtlusb_ptr)->state) +#define SET_USB_STOP(rtlusb_ptr) \ + do { \ + (rtlusb_ptr)->state = USB_STATE_STOP; \ + } while (0) + +#define SET_USB_START(rtlusb_ptr) \ + do { \ + (rtlusb_ptr)->state = USB_STATE_START; \ + } while (0) + +struct rtl_usb { + struct usb_device *udev; + struct usb_interface *intf; + enum rtl_usb_state state; + + /* Bcn control register setting */ + u32 reg_bcn_ctrl_val; + /* for 88/92cu card disable */ + u8 disableHWSM; + /*QOS & EDCA */ + enum acm_method acm_method; + /* irq . HIMR,HIMR_EX */ + u32 irq_mask[2]; + bool irq_enabled; + + u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index); + + /* Tx */ + u8 out_ep_nums ; + u8 out_queue_sel; + struct rtl_ep_map ep_map; + + u32 max_bulk_out_size; + u32 tx_submitted_urbs; + struct sk_buff_head tx_skb_queue[RTL_USB_MAX_EP_NUM]; + + struct usb_anchor tx_pending[RTL_USB_MAX_EP_NUM]; + struct usb_anchor tx_submitted; + + struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *, + struct sk_buff_head *); + int (*usb_tx_post_hdl)(struct ieee80211_hw *, + struct urb *, struct sk_buff *); + void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *); + + /* Rx */ + u8 in_ep_nums ; + u32 in_ep; /* Bulk IN endpoint number */ + u32 rx_max_size; /* Bulk IN max buffer size */ + u32 rx_urb_num; /* How many Bulk INs are submitted to host. */ + struct usb_anchor rx_submitted; + void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *, + struct sk_buff_head *); + void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *); +}; + +struct rtl_usb_priv { + struct rtl_usb dev; + struct rtl_led_ctl ledctl; +}; + +#define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv)) +#define rtl_usbdev(usbpriv) (&((usbpriv)->dev)) + + + +int __devinit rtl_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id); +void rtl_usb_disconnect(struct usb_interface *intf); +int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message); +int rtl_usb_resume(struct usb_interface *pusb_intf); + +#endif diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index d44d79613d2d..01226f8e70f9 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -34,6 +34,8 @@ #include <linux/firmware.h> #include <linux/version.h> #include <linux/etherdevice.h> +#include <linux/vmalloc.h> +#include <linux/usb.h> #include <net/mac80211.h> #include "debug.h" @@ -82,6 +84,19 @@ #define MAC80211_3ADDR_LEN 24 #define MAC80211_4ADDR_LEN 30 +#define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */ +#define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */ +#define MAX_PG_GROUP 13 +#define CHANNEL_GROUP_MAX_2G 3 +#define CHANNEL_GROUP_IDX_5GL 3 +#define CHANNEL_GROUP_IDX_5GM 6 +#define CHANNEL_GROUP_IDX_5GH 9 +#define CHANNEL_GROUP_MAX_5G 9 +#define CHANNEL_MAX_NUMBER_2G 14 +#define AVG_THERMAL_NUM 8 + +/* for early mode */ +#define EM_HDR_LEN 8 enum intf_type { INTF_PCI = 0, INTF_USB = 1, @@ -113,11 +128,38 @@ enum hardware_type { HARDWARE_TYPE_RTL8192CU, HARDWARE_TYPE_RTL8192DE, HARDWARE_TYPE_RTL8192DU, + HARDWARE_TYPE_RTL8723E, + HARDWARE_TYPE_RTL8723U, - /*keep it last*/ + /* keep it last */ HARDWARE_TYPE_NUM }; +#define IS_HARDWARE_TYPE_8192SU(rtlhal) \ + (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SU) +#define IS_HARDWARE_TYPE_8192SE(rtlhal) \ + (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) +#define IS_HARDWARE_TYPE_8192CE(rtlhal) \ + (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) +#define IS_HARDWARE_TYPE_8192CU(rtlhal) \ + (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) +#define IS_HARDWARE_TYPE_8192DE(rtlhal) \ + (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) +#define IS_HARDWARE_TYPE_8192DU(rtlhal) \ + (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DU) +#define IS_HARDWARE_TYPE_8723E(rtlhal) \ + (rtlhal->hw_type == HARDWARE_TYPE_RTL8723E) +#define IS_HARDWARE_TYPE_8723U(rtlhal) \ + (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U) +#define IS_HARDWARE_TYPE_8192S(rtlhal) \ +(IS_HARDWARE_TYPE_8192SE(rtlhal) || IS_HARDWARE_TYPE_8192SU(rtlhal)) +#define IS_HARDWARE_TYPE_8192C(rtlhal) \ +(IS_HARDWARE_TYPE_8192CE(rtlhal) || IS_HARDWARE_TYPE_8192CU(rtlhal)) +#define IS_HARDWARE_TYPE_8192D(rtlhal) \ +(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal)) +#define IS_HARDWARE_TYPE_8723(rtlhal) \ +(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal)) + enum scan_operation_backup_opt { SCAN_OPT_BACKUP = 0, SCAN_OPT_RESTORE, @@ -315,6 +357,7 @@ enum rf_type { RF_1T1R = 0, RF_1T2R = 1, RF_2T2R = 2, + RF_2T2R_GREEN = 3, }; enum ht_channel_width { @@ -359,6 +402,8 @@ enum rtl_var_map { EFUSE_LOADER_CLK_EN, EFUSE_ANA8M, EFUSE_HWSET_MAX_SIZE, + EFUSE_MAX_SECTION_MAP, + EFUSE_REAL_CONTENT_SIZE, /*CAM map */ RWCAM, @@ -397,6 +442,7 @@ enum rtl_var_map { RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */ RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */ RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */ + RTL_IMR_COMDOK, /*Command Queue DMA OK Interrupt*/ RTL_IMR_TBDOK, /*Transmit Beacon OK interrup */ RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */ RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */ @@ -405,7 +451,8 @@ enum rtl_var_map { RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */ RTL_IMR_VODOK, /*AC_VO DMA Interrupt */ RTL_IMR_ROK, /*Receive DMA OK Interrupt */ - RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt|RTL_IMR_TBDOK|RTL_IMR_TBDER)*/ + RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt | RTL_IMR_TBDOK | + * RTL_IMR_TBDER) */ /*CCK Rates, TxHT = 0 */ RTL_RC_CCK_RATE1M, @@ -481,6 +528,19 @@ enum acm_method { eAcmWay2_SW = 2, }; +enum macphy_mode { + SINGLEMAC_SINGLEPHY = 0, + DUALMAC_DUALPHY, + DUALMAC_SINGLEPHY, +}; + +enum band_type { + BAND_ON_2_4G = 0, + BAND_ON_5G, + BAND_ON_BOTH, + BANDMAX +}; + /*aci/aifsn Field. Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/ union aci_aifsn { @@ -505,6 +565,17 @@ enum wireless_mode { WIRELESS_MODE_N_5G = 0x20 }; +#define IS_WIRELESS_MODE_A(wirelessmode) \ + (wirelessmode == WIRELESS_MODE_A) +#define IS_WIRELESS_MODE_B(wirelessmode) \ + (wirelessmode == WIRELESS_MODE_B) +#define IS_WIRELESS_MODE_G(wirelessmode) \ + (wirelessmode == WIRELESS_MODE_G) +#define IS_WIRELESS_MODE_N_24G(wirelessmode) \ + (wirelessmode == WIRELESS_MODE_N_24G) +#define IS_WIRELESS_MODE_N_5G(wirelessmode) \ + (wirelessmode == WIRELESS_MODE_N_5G) + enum ratr_table_mode { RATR_INX_WIRELESS_NGB = 0, RATR_INX_WIRELESS_NG = 1, @@ -574,11 +645,11 @@ struct rtl_probe_rsp { struct rtl_led { void *hw; enum rtl_led_pin ledpin; - bool b_ledon; + bool ledon; }; struct rtl_led_ctl { - bool bled_opendrain; + bool led_opendrain; struct rtl_led sw_led0; struct rtl_led sw_led1; }; @@ -603,6 +674,8 @@ struct false_alarm_statistics { u32 cnt_rate_illegal; u32 cnt_crc8_fail; u32 cnt_mcs_fail; + u32 cnt_fast_fsync_fail; + u32 cnt_sb_search_fail; u32 cnt_ofdm_fail; u32 cnt_cck_fail; u32 cnt_all; @@ -690,6 +763,32 @@ struct rtl_rfkill { bool rfkill_state; /*0 is off, 1 is on */ }; +#define IQK_MATRIX_REG_NUM 8 +#define IQK_MATRIX_SETTINGS_NUM (1 + 24 + 21) +struct iqk_matrix_regs { + bool b_iqk_done; + long value[1][IQK_MATRIX_REG_NUM]; +}; + +struct phy_parameters { + u16 length; + u32 *pdata; +}; + +enum hw_param_tab_index { + PHY_REG_2T, + PHY_REG_1T, + PHY_REG_PG, + RADIOA_2T, + RADIOB_2T, + RADIOA_1T, + RADIOB_1T, + MAC_REG, + AGCTAB_2T, + AGCTAB_1T, + MAX_TAB +}; + struct rtl_phy { struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */ struct init_gain initgain_backup; @@ -705,8 +804,9 @@ struct rtl_phy { u8 current_channel; u8 h2c_box_num; u8 set_io_inprogress; + u8 lck_inprogress; - /*record for power tracking*/ + /* record for power tracking */ s32 reg_e94; s32 reg_e9c; s32 reg_ea4; @@ -723,26 +823,32 @@ struct rtl_phy { u32 iqk_mac_backup[IQK_MAC_REG_NUM]; u32 iqk_bb_backup[10]; - bool b_rfpi_enable; + /* Dual mac */ + bool need_iqk; + struct iqk_matrix_regs iqk_matrix_regsetting[IQK_MATRIX_SETTINGS_NUM]; + + bool rfpi_enable; u8 pwrgroup_cnt; - u8 bcck_high_power; - /* 3 groups of pwr diff by rates*/ - u32 mcs_txpwrlevel_origoffset[4][16]; + u8 cck_high_power; + /* MAX_PG_GROUP groups of pwr diff by rates */ + u32 mcs_txpwrlevel_origoffset[MAX_PG_GROUP][16]; u8 default_initialgain[4]; - /*the current Tx power level*/ + /* the current Tx power level */ u8 cur_cck_txpwridx; u8 cur_ofdm24g_txpwridx; u32 rfreg_chnlval[2]; - bool b_apk_done; + bool apk_done; + u32 reg_rf3c[2]; /* pathA / pathB */ - /*fsync*/ u8 framesync; u32 framesync_c34; u8 num_total_rfpath; + struct phy_parameters hwparam_tables[MAX_TAB]; + u16 rf_pathmap; }; #define MAX_TID_COUNT 9 @@ -768,6 +874,7 @@ struct rtl_tid_data { struct rtl_priv; struct rtl_io { struct device *dev; + struct mutex bb_mutex; /*PCI MEM map */ unsigned long pci_mem_end; /*shared mem end */ @@ -779,11 +886,14 @@ struct rtl_io { void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val); void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val); void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val); - - u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr); - u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr); - u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr); - + int (*writeN_async) (struct rtl_priv *rtlpriv, u32 addr, u16 len, + u8 *pdata); + + u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr); + u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr); + u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr); + int (*readN_sync) (struct rtl_priv *rtlpriv, u32 addr, u16 len, + u8 *pdata); }; struct rtl_mac { @@ -815,16 +925,24 @@ struct rtl_mac { bool act_scanning; u8 cnt_after_linked; - /*RDG*/ bool rdg_en; + /* early mode */ + /* skb wait queue */ + struct sk_buff_head skb_waitq[MAX_TID_COUNT]; + u8 earlymode_threshold; + + /*RDG*/ + bool rdg_en; - /*AP*/ u8 bssid[6]; - u8 mcs[16]; /*16 bytes mcs for HT rates.*/ - u32 basic_rates; /*b/g rates*/ + /*AP*/ + u8 bssid[6]; + u32 vendor; + u8 mcs[16]; /* 16 bytes mcs for HT rates. */ + u32 basic_rates; /* b/g rates */ u8 ht_enable; u8 sgi_40; u8 sgi_20; u8 bw_40; - u8 mode; /*wireless mode*/ + u8 mode; /* wireless mode */ u8 slot_time; u8 short_preamble; u8 use_cts_protect; @@ -835,9 +953,11 @@ struct rtl_mac { u8 retry_long; u16 assoc_id; - /*IBSS*/ int beacon_interval; + /*IBSS*/ + int beacon_interval; - /*AMPDU*/ u8 min_space_cfg; /*For Min spacing configurations */ + /*AMPDU*/ + u8 min_space_cfg; /*For Min spacing configurations */ u8 max_mss_density; u8 current_ampdu_factor; u8 current_ampdu_density; @@ -852,17 +972,54 @@ struct rtl_hal { enum intf_type interface; u16 hw_type; /*92c or 92d or 92s and so on */ + u8 ic_class; u8 oem_id; - u8 version; /*version of chip */ + u32 version; /*version of chip */ u8 state; /*stop 0, start 1 */ /*firmware */ + u32 fwsize; u8 *pfirmware; - bool b_h2c_setinprogress; + u16 fw_version; + u16 fw_subversion; + bool h2c_setinprogress; u8 last_hmeboxnum; - bool bfw_ready; + bool fw_ready; /*Reserve page start offset except beacon in TxQ. */ u8 fw_rsvdpage_startoffset; + u8 h2c_txcmd_seq; + + /* FW Cmd IO related */ + u16 fwcmd_iomap; + u32 fwcmd_ioparam; + bool set_fwcmd_inprogress; + u8 current_fwcmd_io; + + /**/ + bool driver_going2unload; + + /*AMPDU init min space*/ + u8 minspace_cfg; /*For Min spacing configurations */ + + /* Dual mac */ + enum macphy_mode macphymode; + enum band_type current_bandtype; /* 0:2.4G, 1:5G */ + enum band_type current_bandtypebackup; + enum band_type bandset; + /* dual MAC 0--Mac0 1--Mac1 */ + u32 interfaceindex; + /* just for DualMac S3S4 */ + u8 macphyctl_reg; + bool earlymode_enable; + /* Dual mac*/ + bool during_mac0init_radiob; + bool during_mac1init_radioa; + bool reloadtxpowerindex; + /* True if IMR or IQK have done + for 2.4G in scan progress */ + bool load_imrandiqk_setting_for2g; + + bool disable_amsdu_8k; }; struct rtl_security { @@ -887,48 +1044,61 @@ struct rtl_security { }; struct rtl_dm { - /*PHY status for DM */ + /*PHY status for Dynamic Management */ long entry_min_undecoratedsmoothed_pwdb; long undecorated_smoothed_pwdb; /*out dm */ long entry_max_undecoratedsmoothed_pwdb; - bool b_dm_initialgain_enable; - bool bdynamic_txpower_enable; - bool bcurrent_turbo_edca; - bool bis_any_nonbepkts; /*out dm */ - bool bis_cur_rdlstate; - bool btxpower_trackingInit; - bool b_disable_framebursting; - bool b_cck_inch14; - bool btxpower_tracking; - bool b_useramask; - bool brfpath_rxenable[4]; - + bool dm_initialgain_enable; + bool dynamic_txpower_enable; + bool current_turbo_edca; + bool is_any_nonbepkts; /*out dm */ + bool is_cur_rdlstate; + bool txpower_trackingInit; + bool disable_framebursting; + bool cck_inch14; + bool txpower_tracking; + bool useramask; + bool rfpath_rxenable[4]; + bool inform_fw_driverctrldm; + bool current_mrc_switch; + u8 txpowercount; + + u8 thermalvalue_rxgain; u8 thermalvalue_iqk; u8 thermalvalue_lck; u8 thermalvalue; u8 last_dtp_lvl; + u8 thermalvalue_avg[AVG_THERMAL_NUM]; + u8 thermalvalue_avg_index; + bool done_txpower; u8 dynamic_txhighpower_lvl; /*Tx high power level */ - u8 dm_flag; /*Indicate if each dynamic mechanism's status. */ + u8 dm_flag; /*Indicate each dynamic mechanism's status. */ u8 dm_type; u8 txpower_track_control; - + bool interrupt_migration; + bool disable_tx_int; char ofdm_index[2]; char cck_index; + u8 power_index_backup[6]; }; -#define EFUSE_MAX_LOGICAL_SIZE 128 +#define EFUSE_MAX_LOGICAL_SIZE 256 struct rtl_efuse { - bool bautoLoad_ok; + bool autoLoad_ok; bool bootfromefuse; u16 max_physical_size; - u8 contents[EFUSE_MAX_LOGICAL_SIZE]; u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE]; u16 efuse_usedbytes; u8 efuse_usedpercentage; +#ifdef EFUSE_REPG_WORKAROUND + bool efuse_re_pg_sec1flag; + u8 efuse_re_pg_data[8]; +#endif u8 autoload_failflag; + u8 autoload_status; short epromtype; u16 eeprom_vid; @@ -938,69 +1108,90 @@ struct rtl_efuse { u8 eeprom_oemid; u16 eeprom_channelplan; u8 eeprom_version; + u8 board_type; + u8 external_pa; u8 dev_addr[6]; - bool b_txpwr_fromeprom; + bool txpwr_fromeprom; + u8 eeprom_crystalcap; u8 eeprom_tssi[2]; - u8 eeprom_pwrlimit_ht20[3]; - u8 eeprom_pwrlimit_ht40[3]; - u8 eeprom_chnlarea_txpwr_cck[2][3]; - u8 eeprom_chnlarea_txpwr_ht40_1s[2][3]; - u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][3]; - u8 txpwrlevel_cck[2][14]; - u8 txpwrlevel_ht40_1s[2][14]; /*For HT 40MHZ pwr */ - u8 txpwrlevel_ht40_2s[2][14]; /*For HT 40MHZ pwr */ + u8 eeprom_tssi_5g[3][2]; /* for 5GL/5GM/5GH band. */ + u8 eeprom_pwrlimit_ht20[CHANNEL_GROUP_MAX]; + u8 eeprom_pwrlimit_ht40[CHANNEL_GROUP_MAX]; + u8 eeprom_chnlarea_txpwr_cck[2][CHANNEL_GROUP_MAX_2G]; + u8 eeprom_chnlarea_txpwr_ht40_1s[2][CHANNEL_GROUP_MAX]; + u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][CHANNEL_GROUP_MAX]; + u8 txpwrlevel_cck[2][CHANNEL_MAX_NUMBER_2G]; + u8 txpwrlevel_ht40_1s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */ + u8 txpwrlevel_ht40_2s[2][CHANNEL_MAX_NUMBER]; /*For HT 40MHZ pwr */ + + u8 internal_pa_5g[2]; /* pathA / pathB */ + u8 eeprom_c9; + u8 eeprom_cc; /*For power group */ - u8 pwrgroup_ht20[2][14]; - u8 pwrgroup_ht40[2][14]; - - char txpwr_ht20diff[2][14]; /*HT 20<->40 Pwr diff */ - u8 txpwr_legacyhtdiff[2][14]; /*For HT<->legacy pwr diff */ + u8 eeprom_pwrgroup[2][3]; + u8 pwrgroup_ht20[2][CHANNEL_MAX_NUMBER]; + u8 pwrgroup_ht40[2][CHANNEL_MAX_NUMBER]; + + char txpwr_ht20diff[2][CHANNEL_MAX_NUMBER]; /*HT 20<->40 Pwr diff */ + /*For HT<->legacy pwr diff*/ + u8 txpwr_legacyhtdiff[2][CHANNEL_MAX_NUMBER]; + u8 txpwr_safetyflag; /* Band edge enable flag */ + u16 eeprom_txpowerdiff; + u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */ + u8 antenna_txpwdiff[3]; u8 eeprom_regulatory; u8 eeprom_thermalmeter; - /*ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */ - u8 thermalmeter[2]; + u8 thermalmeter[2]; /*ThermalMeter, index 0 for RFIC0, 1 for RFIC1 */ + u16 tssi_13dbm; + u8 crystalcap; /* CrystalCap. */ + u8 delta_iqk; + u8 delta_lck; u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */ - bool b_apk_thermalmeterignore; + bool apk_thermalmeterignore; + + bool b1x1_recvcombine; + bool b1ss_support; + + /*channel plan */ + u8 channel_plan; }; struct rtl_ps_ctl { + bool pwrdomain_protect; bool set_rfpowerstate_inprogress; - bool b_in_powersavemode; + bool in_powersavemode; bool rfchange_inprogress; - bool b_swrf_processing; - bool b_hwradiooff; - - u32 last_sleep_jiffies; - u32 last_awake_jiffies; - u32 last_delaylps_stamp_jiffies; + bool swrf_processing; + bool hwradiooff; /* * just for PCIE ASPM * If it supports ASPM, Offset[560h] = 0x40, * otherwise Offset[560h] = 0x00. * */ - bool b_support_aspm; - bool b_support_backdoor; + bool support_aspm; + bool support_backdoor; /*for LPS */ enum rt_psmode dot11_psmode; /*Power save mode configured. */ - bool b_leisure_ps; - bool b_fwctrl_lps; + bool swctrl_lps; + bool leisure_ps; + bool fwctrl_lps; u8 fwctrl_psmode; /*For Fw control LPS mode */ - u8 b_reg_fwctrl_lps; + u8 reg_fwctrl_lps; /*Record Fw PS mode status. */ - bool b_fw_current_inpsmode; + bool fw_current_inpsmode; u8 reg_max_lps_awakeintvl; bool report_linked; /*for IPS */ - bool b_inactiveps; + bool inactiveps; u32 rfoff_reason; @@ -1011,8 +1202,26 @@ struct rtl_ps_ctl { /*just for PCIE ASPM */ u8 const_amdpci_aspm; + bool pwrdown_mode; + enum rf_pwrstate inactive_pwrstate; enum rf_pwrstate rfpwr_state; /*cur power state */ + + /* for SW LPS*/ + bool sw_ps_enabled; + bool state; + bool state_inap; + bool multi_buffered; + u16 nullfunc_seq; + unsigned int dtim_counter; + unsigned int sleep_ms; + unsigned long last_sleep_jiffies; + unsigned long last_awake_jiffies; + unsigned long last_delaylps_stamp_jiffies; + unsigned long last_dtim; + unsigned long last_beacon; + unsigned long last_action; + unsigned long last_slept; }; struct rtl_stats { @@ -1038,10 +1247,10 @@ struct rtl_stats { s32 recvsignalpower; s8 rxpower; /*in dBm Translate from PWdB */ u8 signalstrength; /*in 0-100 index. */ - u16 b_hwerror:1; - u16 b_crc:1; - u16 b_icv:1; - u16 b_shortpreamble:1; + u16 hwerror:1; + u16 crc:1; + u16 icv:1; + u16 shortpreamble:1; u16 antenna:1; u16 decrypted:1; u16 wakeup:1; @@ -1050,15 +1259,16 @@ struct rtl_stats { u8 rx_drvinfo_size; u8 rx_bufshift; - bool b_isampdu; + bool isampdu; + bool isfirst_ampdu; bool rx_is40Mhzpacket; u32 rx_pwdb_all; u8 rx_mimo_signalstrength[4]; /*in 0~100 index */ s8 rx_mimo_signalquality[2]; - bool b_packet_matchbssid; - bool b_is_cck; - bool b_packet_toself; - bool b_packet_beacon; /*for rssi */ + bool packet_matchbssid; + bool is_cck; + bool packet_toself; + bool packet_beacon; /*for rssi */ char cck_adc_pwdb[4]; /*for rx path selection */ }; @@ -1069,23 +1279,23 @@ struct rt_link_detect { u32 num_tx_inperiod; u32 num_rx_inperiod; - bool b_busytraffic; - bool b_higher_busytraffic; - bool b_higher_busyrxtraffic; + bool busytraffic; + bool higher_busytraffic; + bool higher_busyrxtraffic; }; struct rtl_tcb_desc { - u8 b_packet_bw:1; - u8 b_multicast:1; - u8 b_broadcast:1; - - u8 b_rts_stbc:1; - u8 b_rts_enable:1; - u8 b_cts_enable:1; - u8 b_rts_use_shortpreamble:1; - u8 b_rts_use_shortgi:1; + u8 packet_bw:1; + u8 multicast:1; + u8 broadcast:1; + + u8 rts_stbc:1; + u8 rts_enable:1; + u8 cts_enable:1; + u8 rts_use_shortpreamble:1; + u8 rts_use_shortgi:1; u8 rts_sc:1; - u8 b_rts_bw:1; + u8 rts_bw:1; u8 rts_rate; u8 use_shortgi:1; @@ -1096,20 +1306,34 @@ struct rtl_tcb_desc { u8 ratr_index; u8 mac_id; u8 hw_rate; + + u8 last_inipkt:1; + u8 cmd_or_init:1; + u8 queue_index; + + /* early mode */ + u8 empkt_num; + /* The max value by HW */ + u32 empkt_len[5]; }; struct rtl_hal_ops { int (*init_sw_vars) (struct ieee80211_hw *hw); void (*deinit_sw_vars) (struct ieee80211_hw *hw); + void (*read_chip_version)(struct ieee80211_hw *hw); void (*read_eeprom_info) (struct ieee80211_hw *hw); void (*interrupt_recognized) (struct ieee80211_hw *hw, u32 *p_inta, u32 *p_intb); int (*hw_init) (struct ieee80211_hw *hw); void (*hw_disable) (struct ieee80211_hw *hw); + void (*hw_suspend) (struct ieee80211_hw *hw); + void (*hw_resume) (struct ieee80211_hw *hw); void (*enable_interrupt) (struct ieee80211_hw *hw); void (*disable_interrupt) (struct ieee80211_hw *hw); int (*set_network_type) (struct ieee80211_hw *hw, enum nl80211_iftype type); + void (*set_chk_bssid)(struct ieee80211_hw *hw, + bool check_bssid); void (*set_bw_mode) (struct ieee80211_hw *hw, enum nl80211_channel_type ch_type); u8(*switch_channel) (struct ieee80211_hw *hw); @@ -1126,23 +1350,26 @@ struct rtl_hal_ops { struct ieee80211_hdr *hdr, u8 *pdesc_tx, struct ieee80211_tx_info *info, struct sk_buff *skb, unsigned int queue_index); + void (*fill_fake_txdesc) (struct ieee80211_hw *hw, u8 * pDesc, + u32 buffer_len, bool bIsPsPoll); void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc, - bool b_firstseg, bool b_lastseg, + bool firstseg, bool lastseg, struct sk_buff *skb); - bool(*query_rx_desc) (struct ieee80211_hw *hw, + bool (*cmd_send_packet)(struct ieee80211_hw *hw, struct sk_buff *skb); + bool (*query_rx_desc) (struct ieee80211_hw *hw, struct rtl_stats *stats, struct ieee80211_rx_status *rx_status, u8 *pdesc, struct sk_buff *skb); void (*set_channel_access) (struct ieee80211_hw *hw); - bool(*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid); + bool (*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid); void (*dm_watchdog) (struct ieee80211_hw *hw); void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation); - bool(*set_rf_power_state) (struct ieee80211_hw *hw, + bool (*set_rf_power_state) (struct ieee80211_hw *hw, enum rf_pwrstate rfpwr_state); void (*led_control) (struct ieee80211_hw *hw, enum led_ctl_mode ledaction); void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val); - u32(*get_desc) (u8 *pdesc, bool istx, u8 desc_name); + u32 (*get_desc) (u8 *pdesc, bool istx, u8 desc_name); void (*tx_polling) (struct ieee80211_hw *hw, unsigned int hw_queue); void (*enable_hw_sec) (struct ieee80211_hw *hw); void (*set_key) (struct ieee80211_hw *hw, u32 key_index, @@ -1150,22 +1377,36 @@ struct rtl_hal_ops { bool is_wepkey, bool clear_all); void (*init_sw_leds) (struct ieee80211_hw *hw); void (*deinit_sw_leds) (struct ieee80211_hw *hw); - u32(*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask); + u32 (*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask); void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, u32 data); - u32(*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, + u32 (*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask); void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, u32 regaddr, u32 bitmask, u32 data); + bool (*phy_rf6052_config) (struct ieee80211_hw *hw); + void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw, + u8 *powerlevel); + void (*phy_rf6052_set_ofdm_txpower) (struct ieee80211_hw *hw, + u8 *ppowerlevel, u8 channel); + bool (*config_bb_with_headerfile) (struct ieee80211_hw *hw, + u8 configtype); + bool (*config_bb_with_pgheaderfile) (struct ieee80211_hw *hw, + u8 configtype); + void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t); + void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw); + void (*dm_dynamic_txpower) (struct ieee80211_hw *hw); }; struct rtl_intf_ops { /*com */ + void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf); int (*adapter_start) (struct ieee80211_hw *hw); void (*adapter_stop) (struct ieee80211_hw *hw); int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb); int (*reset_trx_ring) (struct ieee80211_hw *hw); + bool (*waitq_insert) (struct ieee80211_hw *hw, struct sk_buff *skb); /*pci */ void (*disable_aspm) (struct ieee80211_hw *hw); @@ -1179,11 +1420,36 @@ struct rtl_mod_params { int sw_crypto; }; +struct rtl_hal_usbint_cfg { + /* data - rx */ + u32 in_ep_num; + u32 rx_urb_num; + u32 rx_max_size; + + /* op - rx */ + void (*usb_rx_hdl)(struct ieee80211_hw *, struct sk_buff *); + void (*usb_rx_segregate_hdl)(struct ieee80211_hw *, struct sk_buff *, + struct sk_buff_head *); + + /* tx */ + void (*usb_tx_cleanup)(struct ieee80211_hw *, struct sk_buff *); + int (*usb_tx_post_hdl)(struct ieee80211_hw *, struct urb *, + struct sk_buff *); + struct sk_buff *(*usb_tx_aggregate_hdl)(struct ieee80211_hw *, + struct sk_buff_head *); + + /* endpoint mapping */ + int (*usb_endpoint_mapping)(struct ieee80211_hw *hw); + u16 (*usb_mq_to_hwq)(__le16 fc, u16 mac80211_queue_index); +}; + struct rtl_hal_cfg { + u8 bar_id; char *name; char *fw_name; struct rtl_hal_ops *ops; struct rtl_mod_params *mod_params; + struct rtl_hal_usbint_cfg *usb_interface_cfg; /*this map used for some registers or vars defined int HAL but used in MAIN */ @@ -1202,6 +1468,11 @@ struct rtl_locks { spinlock_t rf_ps_lock; spinlock_t rf_lock; spinlock_t lps_lock; + spinlock_t waitq_lock; + spinlock_t tx_urb_lock; + + /*Dual mac*/ + spinlock_t cck_and_rw_pagea_lock; }; struct rtl_works { @@ -1218,12 +1489,20 @@ struct rtl_works { struct workqueue_struct *rtl_wq; struct delayed_work watchdog_wq; struct delayed_work ips_nic_off_wq; + + /* For SW LPS */ + struct delayed_work ps_work; + struct delayed_work ps_rfon_wq; }; struct rtl_debug { u32 dbgp_type[DBGP_TYPE_MAX]; u32 global_debuglevel; u64 global_debugcomponents; + + /* add for proc debug */ + struct proc_dir_entry *proc_dir; + char proc_name[20]; }; struct rtl_priv { @@ -1274,6 +1553,91 @@ struct rtl_priv { #define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse)) #define rtl_psc(rtlpriv) (&((rtlpriv)->psc)) + +/*************************************** + Bluetooth Co-existance Related +****************************************/ + +enum bt_ant_num { + ANT_X2 = 0, + ANT_X1 = 1, +}; + +enum bt_co_type { + BT_2WIRE = 0, + BT_ISSC_3WIRE = 1, + BT_ACCEL = 2, + BT_CSR_BC4 = 3, + BT_CSR_BC8 = 4, + BT_RTL8756 = 5, +}; + +enum bt_cur_state { + BT_OFF = 0, + BT_ON = 1, +}; + +enum bt_service_type { + BT_SCO = 0, + BT_A2DP = 1, + BT_HID = 2, + BT_HID_IDLE = 3, + BT_SCAN = 4, + BT_IDLE = 5, + BT_OTHER_ACTION = 6, + BT_BUSY = 7, + BT_OTHERBUSY = 8, + BT_PAN = 9, +}; + +enum bt_radio_shared { + BT_RADIO_SHARED = 0, + BT_RADIO_INDIVIDUAL = 1, +}; + +struct bt_coexist_info { + + /* EEPROM BT info. */ + u8 eeprom_bt_coexist; + u8 eeprom_bt_type; + u8 eeprom_bt_ant_num; + u8 eeprom_bt_ant_isolation; + u8 eeprom_bt_radio_shared; + + u8 bt_coexistence; + u8 bt_ant_num; + u8 bt_coexist_type; + u8 bt_state; + u8 bt_cur_state; /* 0:on, 1:off */ + u8 bt_ant_isolation; /* 0:good, 1:bad */ + u8 bt_pape_ctrl; /* 0:SW, 1:SW/HW dynamic */ + u8 bt_service; + u8 bt_radio_shared_type; + u8 bt_rfreg_origin_1e; + u8 bt_rfreg_origin_1f; + u8 bt_rssi_state; + u32 ratio_tx; + u32 ratio_pri; + u32 bt_edca_ul; + u32 bt_edca_dl; + + bool b_init_set; + bool b_bt_busy_traffic; + bool b_bt_traffic_mode_set; + bool b_bt_non_traffic_mode_set; + + bool b_fw_coexist_all_off; + bool b_sw_coexist_all_off; + u32 current_state; + u32 previous_state; + u8 bt_pre_rssi_state; + + u8 b_reg_bt_iso; + u8 b_reg_bt_sco; + +}; + + /**************************************** mem access macro define start Call endian free function when @@ -1281,7 +1645,7 @@ struct rtl_priv { 2. Before write integer to IO. 3. After read integer from IO. ****************************************/ -/* Convert little data endian to host */ +/* Convert little data endian to host ordering */ #define EF1BYTE(_val) \ ((u8)(_val)) #define EF2BYTE(_val) \ @@ -1289,27 +1653,21 @@ struct rtl_priv { #define EF4BYTE(_val) \ (le32_to_cpu(_val)) -/* Read data from memory */ -#define READEF1BYTE(_ptr) \ - EF1BYTE(*((u8 *)(_ptr))) +/* Read le16 data from memory and convert to host ordering */ #define READEF2BYTE(_ptr) \ EF2BYTE(*((u16 *)(_ptr))) -#define READEF4BYTE(_ptr) \ - EF4BYTE(*((u32 *)(_ptr))) -/* Write data to memory */ -#define WRITEEF1BYTE(_ptr, _val) \ - (*((u8 *)(_ptr))) = EF1BYTE(_val) +/* Write le16 data to memory in host ordering */ #define WRITEEF2BYTE(_ptr, _val) \ (*((u16 *)(_ptr))) = EF2BYTE(_val) -#define WRITEEF4BYTE(_ptr, _val) \ - (*((u32 *)(_ptr))) = EF4BYTE(_val) - -/*Example: -BIT_LEN_MASK_32(0) => 0x00000000 -BIT_LEN_MASK_32(1) => 0x00000001 -BIT_LEN_MASK_32(2) => 0x00000003 -BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/ + +/* Create a bit mask + * Examples: + * BIT_LEN_MASK_32(0) => 0x00000000 + * BIT_LEN_MASK_32(1) => 0x00000001 + * BIT_LEN_MASK_32(2) => 0x00000003 + * BIT_LEN_MASK_32(32) => 0xFFFFFFFF + */ #define BIT_LEN_MASK_32(__bitlen) \ (0xFFFFFFFF >> (32 - (__bitlen))) #define BIT_LEN_MASK_16(__bitlen) \ @@ -1317,9 +1675,11 @@ BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/ #define BIT_LEN_MASK_8(__bitlen) \ (0xFF >> (8 - (__bitlen))) -/*Example: -BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003 -BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/ +/* Create an offset bit mask + * Examples: + * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003 + * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000 + */ #define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \ (BIT_LEN_MASK_32(__bitlen) << (__bitoffset)) #define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \ @@ -1328,8 +1688,9 @@ BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/ (BIT_LEN_MASK_8(__bitlen) << (__bitoffset)) /*Description: -Return 4-byte value in host byte ordering from -4-byte pointer in little-endian system.*/ + * Return 4-byte value in host byte ordering from + * 4-byte pointer in little-endian system. + */ #define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \ (EF4BYTE(*((u32 *)(__pstart)))) #define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \ @@ -1337,28 +1698,10 @@ Return 4-byte value in host byte ordering from #define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \ (EF1BYTE(*((u8 *)(__pstart)))) -/*Description: -Translate subfield (continuous bits in little-endian) of 4-byte -value to host byte ordering.*/ -#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \ - ( \ - (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \ - BIT_LEN_MASK_32(__bitlen) \ - ) -#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \ - ( \ - (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \ - BIT_LEN_MASK_16(__bitlen) \ - ) -#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \ - ( \ - (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \ - BIT_LEN_MASK_8(__bitlen) \ - ) - -/*Description: -Mask subfield (continuous bits in little-endian) of 4-byte value -and return the result in 4-byte value in host byte ordering.*/ +/* Description: + * Mask subfield (continuous bits in little-endian) of 4-byte value + * and return the result in 4-byte value in host byte ordering. + */ #define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \ ( \ LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \ @@ -1375,20 +1718,9 @@ and return the result in 4-byte value in host byte ordering.*/ (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \ ) -/*Description: -Set subfield of little-endian 4-byte value to specified value. */ -#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \ - *((u32 *)(__pstart)) = EF4BYTE \ - ( \ - LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \ - ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \ - ); -#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \ - *((u16 *)(__pstart)) = EF2BYTE \ - ( \ - LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \ - ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \ - ); +/* Description: + * Set subfield of little-endian 4-byte value to specified value. + */ #define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \ *((u8 *)(__pstart)) = EF1BYTE \ ( \ @@ -1400,13 +1732,14 @@ Set subfield of little-endian 4-byte value to specified value. */ mem access macro define end ****************************************/ -#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC) +#define byte(x, n) ((x >> (8 * n)) & 0xff) + #define RTL_WATCH_DOG_TIME 2000 #define MSECS(t) msecs_to_jiffies(t) -#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS) -#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE) -#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE) -#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA) +#define WLAN_FC_GET_VERS(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_VERS) +#define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) +#define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) +#define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA) #define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) #define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) #define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) @@ -1420,6 +1753,8 @@ Set subfield of little-endian 4-byte value to specified value. */ #define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */ /*Always enable ASPM and Clock Req in initialization.*/ #define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6) +/* no matter RFOFF or SLEEP we set PS_ASPM_LEVL*/ +#define RT_PS_LEVEL_ASPM BIT(7) /*When LPS is on, disable 2R if no packet is received or transmittd.*/ #define RT_RF_LPS_DISALBE_2R BIT(30) #define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */ @@ -1433,15 +1768,6 @@ Set subfield of little-endian 4-byte value to specified value. */ #define container_of_dwork_rtl(x, y, z) \ container_of(container_of(x, struct delayed_work, work), y, z) -#define FILL_OCTET_STRING(_os, _octet, _len) \ - (_os).octet = (u8 *)(_octet); \ - (_os).length = (_len); - -#define CP_MACADDR(des, src) \ - ((des)[0] = (src)[0], (des)[1] = (src)[1],\ - (des)[2] = (src)[2], (des)[3] = (src)[3],\ - (des)[4] = (src)[4], (des)[5] = (src)[5]) - static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr) { return rtlpriv->io.read8_sync(rtlpriv, addr); diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/wl1251/acx.c index 64a0214cfb29..ef8370edace7 100644 --- a/drivers/net/wireless/wl1251/acx.c +++ b/drivers/net/wireless/wl1251/acx.c @@ -776,6 +776,31 @@ out: return ret; } +int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight, + u8 depth, enum wl1251_acx_low_rssi_type type) +{ + struct acx_low_rssi *rssi; + int ret; + + wl1251_debug(DEBUG_ACX, "acx low rssi"); + + rssi = kzalloc(sizeof(*rssi), GFP_KERNEL); + if (!rssi) + return -ENOMEM; + + rssi->threshold = threshold; + rssi->weight = weight; + rssi->depth = depth; + rssi->type = type; + + ret = wl1251_cmd_configure(wl, ACX_LOW_RSSI, rssi, sizeof(*rssi)); + if (ret < 0) + wl1251_warning("failed to set low rssi threshold: %d", ret); + + kfree(rssi); + return ret; +} + int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble) { struct acx_preamble *acx; @@ -978,6 +1003,34 @@ out: return ret; } +int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode, + u8 max_consecutive) +{ + struct wl1251_acx_bet_enable *acx; + int ret; + + wl1251_debug(DEBUG_ACX, "acx bet enable"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->enable = mode; + acx->max_consecutive = max_consecutive; + + ret = wl1251_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx)); + if (ret < 0) { + wl1251_warning("wl1251 acx bet enable failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max, u8 aifs, u16 txop) { diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h index efcc3aaca14f..c2ba100f9b1a 100644 --- a/drivers/net/wireless/wl1251/acx.h +++ b/drivers/net/wireless/wl1251/acx.h @@ -399,6 +399,49 @@ struct acx_rts_threshold { u8 pad[2]; } __packed; +enum wl1251_acx_low_rssi_type { + /* + * The event is a "Level" indication which keeps triggering + * as long as the average RSSI is below the threshold. + */ + WL1251_ACX_LOW_RSSI_TYPE_LEVEL = 0, + + /* + * The event is an "Edge" indication which triggers + * only when the RSSI threshold is crossed from above. + */ + WL1251_ACX_LOW_RSSI_TYPE_EDGE = 1, +}; + +struct acx_low_rssi { + struct acx_header header; + + /* + * The threshold (in dBm) below (or above after low rssi + * indication) which the firmware generates an interrupt to the + * host. This parameter is signed. + */ + s8 threshold; + + /* + * The weight of the current RSSI sample, before adding the new + * sample, that is used to calculate the average RSSI. + */ + u8 weight; + + /* + * The number of Beacons/Probe response frames that will be + * received before issuing the Low or Regained RSSI event. + */ + u8 depth; + + /* + * Configures how the Low RSSI Event is triggered. Refer to + * enum wl1251_acx_low_rssi_type for more. + */ + u8 type; +} __packed; + struct acx_beacon_filter_option { struct acx_header header; @@ -1164,6 +1207,31 @@ struct wl1251_acx_wr_tbtt_and_dtim { u8 padding; } __packed; +enum wl1251_acx_bet_mode { + WL1251_ACX_BET_DISABLE = 0, + WL1251_ACX_BET_ENABLE = 1, +}; + +struct wl1251_acx_bet_enable { + struct acx_header header; + + /* + * Specifies if beacon early termination procedure is enabled or + * disabled, see enum wl1251_acx_bet_mode. + */ + u8 enable; + + /* + * Specifies the maximum number of consecutive beacons that may be + * early terminated. After this number is reached at least one full + * beacon must be correctly received in FW before beacon ET + * resumes. Range 0 - 255. + */ + u8 max_consecutive; + + u8 padding[2]; +} __packed; + struct wl1251_acx_ac_cfg { struct acx_header header; @@ -1393,6 +1461,8 @@ int wl1251_acx_cca_threshold(struct wl1251 *wl); int wl1251_acx_bcn_dtim_options(struct wl1251 *wl); int wl1251_acx_aid(struct wl1251 *wl, u16 aid); int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask); +int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight, + u8 depth, enum wl1251_acx_low_rssi_type type); int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble); int wl1251_acx_cts_protect(struct wl1251 *wl, enum acx_ctsprotect_type ctsprotect); @@ -1401,6 +1471,8 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime); int wl1251_acx_rate_policies(struct wl1251 *wl); int wl1251_acx_mem_cfg(struct wl1251 *wl); int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim); +int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode, + u8 max_consecutive); int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max, u8 aifs, u16 txop); int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue, diff --git a/drivers/net/wireless/wl1251/event.c b/drivers/net/wireless/wl1251/event.c index 712372e50a87..dfc4579acb06 100644 --- a/drivers/net/wireless/wl1251/event.c +++ b/drivers/net/wireless/wl1251/event.c @@ -90,6 +90,24 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox) } } + if (wl->vif && wl->rssi_thold) { + if (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID) { + wl1251_debug(DEBUG_EVENT, + "ROAMING_TRIGGER_LOW_RSSI_EVENT"); + ieee80211_cqm_rssi_notify(wl->vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, + GFP_KERNEL); + } + + if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) { + wl1251_debug(DEBUG_EVENT, + "ROAMING_TRIGGER_REGAINED_RSSI_EVENT"); + ieee80211_cqm_rssi_notify(wl->vif, + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, + GFP_KERNEL); + } + } + return 0; } diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c index 40372bac9482..12c9e635a6d6 100644 --- a/drivers/net/wireless/wl1251/main.c +++ b/drivers/net/wireless/wl1251/main.c @@ -375,7 +375,7 @@ out: mutex_unlock(&wl->mutex); } -static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct wl1251 *wl = hw->priv; unsigned long flags; @@ -401,8 +401,6 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) wl->tx_queue_stopped = true; spin_unlock_irqrestore(&wl->wl_lock, flags); } - - return NETDEV_TX_OK; } static int wl1251_op_start(struct ieee80211_hw *hw) @@ -502,6 +500,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw) wl->psm = 0; wl->tx_queue_stopped = false; wl->power_level = WL1251_DEFAULT_POWER_LEVEL; + wl->rssi_thold = 0; wl->channel = WL1251_DEFAULT_CHANNEL; wl1251_debugfs_reset(wl); @@ -959,6 +958,16 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, if (ret < 0) goto out; + if (changed & BSS_CHANGED_CQM) { + ret = wl1251_acx_low_rssi(wl, bss_conf->cqm_rssi_thold, + WL1251_DEFAULT_LOW_RSSI_WEIGHT, + WL1251_DEFAULT_LOW_RSSI_DEPTH, + WL1251_ACX_LOW_RSSI_TYPE_EDGE); + if (ret < 0) + goto out; + wl->rssi_thold = bss_conf->cqm_rssi_thold; + } + if (changed & BSS_CHANGED_BSSID) { memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); @@ -1313,9 +1322,11 @@ int wl1251_init_ieee80211(struct wl1251 *wl) wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_BEACON_FILTER | - IEEE80211_HW_SUPPORTS_UAPSD; + IEEE80211_HW_SUPPORTS_UAPSD | + IEEE80211_HW_SUPPORTS_CQM_RSSI; - wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC); wl->hw->wiphy->max_scan_ssids = 1; wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz; @@ -1377,6 +1388,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void) wl->psm_requested = false; wl->tx_queue_stopped = false; wl->power_level = WL1251_DEFAULT_POWER_LEVEL; + wl->rssi_thold = 0; wl->beacon_int = WL1251_DEFAULT_BEACON_INT; wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD; wl->vif = NULL; diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/wl1251/ps.c index 5ed47c8373d2..9cc514703d2a 100644 --- a/drivers/net/wireless/wl1251/ps.c +++ b/drivers/net/wireless/wl1251/ps.c @@ -58,7 +58,6 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl) unsigned long delay; if (wl->psm) { - cancel_delayed_work(&wl->elp_work); delay = msecs_to_jiffies(ELP_ENTRY_DELAY); ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay); } @@ -69,6 +68,9 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl) unsigned long timeout, start; u32 elp_reg; + if (delayed_work_pending(&wl->elp_work)) + cancel_delayed_work(&wl->elp_work); + if (!wl->elp) return 0; @@ -102,38 +104,6 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl) return 0; } -static int wl1251_ps_set_elp(struct wl1251 *wl, bool enable) -{ - int ret; - - if (enable) { - wl1251_debug(DEBUG_PSM, "sleep auth psm/elp"); - - ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP); - if (ret < 0) - return ret; - - wl1251_ps_elp_sleep(wl); - } else { - wl1251_debug(DEBUG_PSM, "sleep auth cam"); - - /* - * When the target is in ELP, we can only - * access the ELP control register. Thus, - * we have to wake the target up before - * changing the power authorization. - */ - - wl1251_ps_elp_wakeup(wl); - - ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM); - if (ret < 0) - return ret; - } - - return 0; -} - int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode) { int ret; @@ -153,11 +123,16 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode) if (ret < 0) return ret; + ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE, + WL1251_DEFAULT_BET_CONSECUTIVE); + if (ret < 0) + return ret; + ret = wl1251_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); if (ret < 0) return ret; - ret = wl1251_ps_set_elp(wl, true); + ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP); if (ret < 0) return ret; @@ -166,7 +141,14 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode) case STATION_ACTIVE_MODE: default: wl1251_debug(DEBUG_PSM, "leaving psm"); - ret = wl1251_ps_set_elp(wl, false); + + ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM); + if (ret < 0) + return ret; + + /* disable BET */ + ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE, + WL1251_DEFAULT_BET_CONSECUTIVE); if (ret < 0) return ret; diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/wl1251/rx.c index efa53607d5c9..c1b3b3f03da2 100644 --- a/drivers/net/wireless/wl1251/rx.c +++ b/drivers/net/wireless/wl1251/rx.c @@ -78,9 +78,10 @@ static void wl1251_rx_status(struct wl1251 *wl, */ wl->noise = desc->rssi - desc->snr / 2; - status->freq = ieee80211_channel_to_frequency(desc->channel); + status->freq = ieee80211_channel_to_frequency(desc->channel, + status->band); - status->flag |= RX_FLAG_TSFT; + status->flag |= RX_FLAG_MACTIME_MPDU; if (desc->flags & RX_DESC_ENCRYPTION_MASK) { status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; @@ -95,8 +96,52 @@ static void wl1251_rx_status(struct wl1251 *wl, if (unlikely(!(desc->flags & RX_DESC_VALID_FCS))) status->flag |= RX_FLAG_FAILED_FCS_CRC; + switch (desc->rate) { + /* skip 1 and 12 Mbps because they have same value 0x0a */ + case RATE_2MBPS: + status->rate_idx = 1; + break; + case RATE_5_5MBPS: + status->rate_idx = 2; + break; + case RATE_11MBPS: + status->rate_idx = 3; + break; + case RATE_6MBPS: + status->rate_idx = 4; + break; + case RATE_9MBPS: + status->rate_idx = 5; + break; + case RATE_18MBPS: + status->rate_idx = 7; + break; + case RATE_24MBPS: + status->rate_idx = 8; + break; + case RATE_36MBPS: + status->rate_idx = 9; + break; + case RATE_48MBPS: + status->rate_idx = 10; + break; + case RATE_54MBPS: + status->rate_idx = 11; + break; + } + + /* for 1 and 12 Mbps we have to check the modulation */ + if (desc->rate == RATE_1MBPS) { + if (!(desc->mod_pre & OFDM_RATE_BIT)) + /* CCK -> RATE_1MBPS */ + status->rate_idx = 0; + else + /* OFDM -> RATE_12MBPS */ + status->rate_idx = 6; + } - /* FIXME: set status->rate_idx */ + if (desc->mod_pre & SHORT_PREAMBLE_BIT) + status->flag |= RX_FLAG_SHORTPRE; } static void wl1251_rx_body(struct wl1251 *wl, diff --git a/drivers/net/wireless/wl1251/tx.c b/drivers/net/wireless/wl1251/tx.c index 554b4f9a3d3e..28121c590a2b 100644 --- a/drivers/net/wireless/wl1251/tx.c +++ b/drivers/net/wireless/wl1251/tx.c @@ -213,16 +213,30 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb, wl1251_debug(DEBUG_TX, "skb offset %d", offset); /* check whether the current skb can be used */ - if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) { - unsigned char *src = skb->data; + if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) { + struct sk_buff *newskb = skb_copy_expand(skb, 0, 3, + GFP_KERNEL); + + if (unlikely(newskb == NULL)) { + wl1251_error("Can't allocate skb!"); + return -EINVAL; + } - /* align the buffer on a 4-byte boundary */ + tx_hdr = (struct tx_double_buffer_desc *) newskb->data; + + dev_kfree_skb_any(skb); + wl->tx_frames[tx_hdr->id] = skb = newskb; + + offset = (4 - (long)skb->data) & 0x03; + wl1251_debug(DEBUG_TX, "new skb offset %d", offset); + } + + /* align the buffer on a 4-byte boundary */ + if (offset) { + unsigned char *src = skb->data; skb_reserve(skb, offset); memmove(skb->data, src, skb->len); tx_hdr = (struct tx_double_buffer_desc *) skb->data; - } else { - wl1251_info("No handler, fixme!"); - return -EINVAL; } } @@ -368,7 +382,7 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl, { struct ieee80211_tx_info *info; struct sk_buff *skb; - int hdrlen, ret; + int hdrlen; u8 *frame; skb = wl->tx_frames[result->id]; @@ -407,40 +421,12 @@ static void wl1251_tx_packet_cb(struct wl1251 *wl, ieee80211_tx_status(wl->hw, skb); wl->tx_frames[result->id] = NULL; - - if (wl->tx_queue_stopped) { - wl1251_debug(DEBUG_TX, "cb: queue was stopped"); - - skb = skb_dequeue(&wl->tx_queue); - - /* The skb can be NULL because tx_work might have been - scheduled before the queue was stopped making the - queue empty */ - - if (skb) { - ret = wl1251_tx_frame(wl, skb); - if (ret == -EBUSY) { - /* firmware buffer is still full */ - wl1251_debug(DEBUG_TX, "cb: fw buffer " - "still full"); - skb_queue_head(&wl->tx_queue, skb); - return; - } else if (ret < 0) { - dev_kfree_skb(skb); - return; - } - } - - wl1251_debug(DEBUG_TX, "cb: waking queues"); - ieee80211_wake_queues(wl->hw); - wl->tx_queue_stopped = false; - } } /* Called upon reception of a TX complete interrupt */ void wl1251_tx_complete(struct wl1251 *wl) { - int i, result_index, num_complete = 0; + int i, result_index, num_complete = 0, queue_len; struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr; unsigned long flags; @@ -471,18 +457,22 @@ void wl1251_tx_complete(struct wl1251 *wl) } } - if (wl->tx_queue_stopped - && - skb_queue_len(&wl->tx_queue) <= WL1251_TX_QUEUE_LOW_WATERMARK){ + queue_len = skb_queue_len(&wl->tx_queue); - /* firmware buffer has space, restart queues */ + if ((num_complete > 0) && (queue_len > 0)) { + /* firmware buffer has space, reschedule tx_work */ + wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work"); + ieee80211_queue_work(wl->hw, &wl->tx_work); + } + + if (wl->tx_queue_stopped && + queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) { + /* tx_queue has space, restart queues */ wl1251_debug(DEBUG_TX, "tx_complete: waking queues"); spin_lock_irqsave(&wl->wl_lock, flags); ieee80211_wake_queues(wl->hw); wl->tx_queue_stopped = false; spin_unlock_irqrestore(&wl->wl_lock, flags); - ieee80211_queue_work(wl->hw, &wl->tx_work); - } /* Every completed frame needs to be acknowledged */ diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h index c0ce2c8b43b8..bb23cd522b22 100644 --- a/drivers/net/wireless/wl1251/wl1251.h +++ b/drivers/net/wireless/wl1251/wl1251.h @@ -370,6 +370,8 @@ struct wl1251 { /* in dBm */ int power_level; + int rssi_thold; + struct wl1251_stats stats; struct wl1251_debugfs debugfs; @@ -410,6 +412,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl); #define WL1251_DEFAULT_CHANNEL 0 +#define WL1251_DEFAULT_BET_CONSECUTIVE 10 + #define CHIP_ID_1251_PG10 (0x7010101) #define CHIP_ID_1251_PG11 (0x7020101) #define CHIP_ID_1251_PG12 (0x7030101) @@ -431,4 +435,7 @@ void wl1251_disable_interrupts(struct wl1251 *wl); #define WL1251_PART_WORK_REG_START REGISTERS_BASE #define WL1251_PART_WORK_REG_SIZE REGISTERS_WORK_SIZE +#define WL1251_DEFAULT_LOW_RSSI_WEIGHT 10 +#define WL1251_DEFAULT_LOW_RSSI_DEPTH 10 + #endif diff --git a/drivers/net/wireless/wl1251/wl12xx_80211.h b/drivers/net/wireless/wl1251/wl12xx_80211.h index 184628027213..1417b1445c3d 100644 --- a/drivers/net/wireless/wl1251/wl12xx_80211.h +++ b/drivers/net/wireless/wl1251/wl12xx_80211.h @@ -54,7 +54,6 @@ /* This really should be 8, but not for our firmware */ #define MAX_SUPPORTED_RATES 32 -#define COUNTRY_STRING_LEN 3 #define MAX_COUNTRY_TRIPLETS 32 /* Headers */ @@ -98,7 +97,7 @@ struct country_triplet { struct wl12xx_ie_country { struct wl12xx_ie_header header; - u8 country_string[COUNTRY_STRING_LEN]; + u8 country_string[IEEE80211_COUNTRY_STRING_LEN]; struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; } __packed; diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig index 0e65bce457d6..692ebff38fc8 100644 --- a/drivers/net/wireless/wl12xx/Kconfig +++ b/drivers/net/wireless/wl12xx/Kconfig @@ -54,7 +54,7 @@ config WL12XX_SDIO config WL12XX_SDIO_TEST tristate "TI wl12xx SDIO testing support" - depends on WL12XX && MMC + depends on WL12XX && MMC && WL12XX_SDIO default n ---help--- This module adds support for the SDIO bus testing with the diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c index cc4068d2b4a8..a3db755ceeda 100644 --- a/drivers/net/wireless/wl12xx/acx.c +++ b/drivers/net/wireless/wl12xx/acx.c @@ -751,10 +751,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats) return 0; } -int wl1271_acx_rate_policies(struct wl1271 *wl) +int wl1271_acx_sta_rate_policies(struct wl1271 *wl) { - struct acx_rate_policy *acx; - struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf; + struct acx_sta_rate_policy *acx; + struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf; int idx = 0; int ret = 0; @@ -783,6 +783,10 @@ int wl1271_acx_rate_policies(struct wl1271 *wl) acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT); + wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x", + acx->rate_class[ACX_TX_BASIC_RATE].enabled_rates, + acx->rate_class[ACX_TX_AP_FULL_RATE].enabled_rates); + ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("Setting of rate policies failed: %d", ret); @@ -794,6 +798,38 @@ out: return ret; } +int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, + u8 idx) +{ + struct acx_ap_rate_policy *acx; + int ret = 0; + + wl1271_debug(DEBUG_ACX, "acx ap rate policy"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates); + acx->rate_policy.short_retry_limit = c->short_retry_limit; + acx->rate_policy.long_retry_limit = c->long_retry_limit; + acx->rate_policy.aflags = c->aflags; + + acx->rate_policy_idx = cpu_to_le32(idx); + + ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("Setting of ap rate policy failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop) { @@ -915,9 +951,9 @@ out: return ret; } -int wl1271_acx_mem_cfg(struct wl1271 *wl) +int wl1271_acx_ap_mem_cfg(struct wl1271 *wl) { - struct wl1271_acx_config_memory *mem_conf; + struct wl1271_acx_ap_config_memory *mem_conf; int ret; wl1271_debug(DEBUG_ACX, "wl1271 mem cfg"); @@ -929,10 +965,10 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl) } /* memory config */ - mem_conf->num_stations = DEFAULT_NUM_STATIONS; - mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS; - mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS; - mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES; + mem_conf->num_stations = wl->conf.mem.num_stations; + mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num; + mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num; + mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles; mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS); ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, @@ -947,13 +983,45 @@ out: return ret; } -int wl1271_acx_init_mem_config(struct wl1271 *wl) +int wl1271_acx_sta_mem_cfg(struct wl1271 *wl) { + struct wl1271_acx_sta_config_memory *mem_conf; int ret; - ret = wl1271_acx_mem_cfg(wl); - if (ret < 0) - return ret; + wl1271_debug(DEBUG_ACX, "wl1271 mem cfg"); + + mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL); + if (!mem_conf) { + ret = -ENOMEM; + goto out; + } + + /* memory config */ + mem_conf->num_stations = wl->conf.mem.num_stations; + mem_conf->rx_mem_block_num = wl->conf.mem.rx_block_num; + mem_conf->tx_min_mem_block_num = wl->conf.mem.tx_min_block_num; + mem_conf->num_ssid_profiles = wl->conf.mem.ssid_profiles; + mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS); + mem_conf->dyn_mem_enable = wl->conf.mem.dynamic_memory; + mem_conf->tx_free_req = wl->conf.mem.min_req_tx_blocks; + mem_conf->rx_free_req = wl->conf.mem.min_req_rx_blocks; + mem_conf->tx_min = wl->conf.mem.tx_min; + + ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, + sizeof(*mem_conf)); + if (ret < 0) { + wl1271_warning("wl1271 mem config failed: %d", ret); + goto out; + } + +out: + kfree(mem_conf); + return ret; +} + +int wl1271_acx_init_mem_config(struct wl1271 *wl) +{ + int ret; wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map), GFP_KERNEL); @@ -1233,6 +1301,7 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, struct wl1271_acx_ht_capabilities *acx; u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; int ret = 0; + u32 ht_capabilites = 0; wl1271_debug(DEBUG_ACX, "acx ht capabilities setting"); @@ -1244,27 +1313,26 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, /* Allow HT Operation ? */ if (allow_ht_operation) { - acx->ht_capabilites = + ht_capabilites = WL1271_ACX_FW_CAP_HT_OPERATION; if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD) - acx->ht_capabilites |= + ht_capabilites |= WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT; if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) - acx->ht_capabilites |= + ht_capabilites |= WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS; if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT) - acx->ht_capabilites |= + ht_capabilites |= WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION; /* get data from A-MPDU parameters field */ acx->ampdu_max_length = ht_cap->ampdu_factor; acx->ampdu_min_spacing = ht_cap->ampdu_density; - - memcpy(acx->mac_address, mac_address, ETH_ALEN); - } else { /* HT operations are not allowed */ - acx->ht_capabilites = 0; } + memcpy(acx->mac_address, mac_address, ETH_ALEN); + acx->ht_capabilites = cpu_to_le32(ht_capabilites); + ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx ht capabilities setting failed: %d", ret); @@ -1293,7 +1361,8 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl, acx->ht_protection = (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); acx->rifs_mode = 0; - acx->gf_protection = 0; + acx->gf_protection = + !!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); acx->ht_tx_burst_limit = 0; acx->dual_cts_protection = 0; @@ -1309,6 +1378,91 @@ out: return ret; } +/* Configure BA session initiator/receiver parameters setting in the FW. */ +int wl1271_acx_set_ba_session(struct wl1271 *wl, + enum ieee80211_back_parties direction, + u8 tid_index, u8 policy) +{ + struct wl1271_acx_ba_session_policy *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx ba session setting"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + /* ANY role */ + acx->role_id = 0xff; + acx->tid = tid_index; + acx->enable = policy; + acx->ba_direction = direction; + + switch (direction) { + case WLAN_BACK_INITIATOR: + acx->win_size = wl->conf.ht.tx_ba_win_size; + acx->inactivity_timeout = wl->conf.ht.inactivity_timeout; + break; + case WLAN_BACK_RECIPIENT: + acx->win_size = RX_BA_WIN_SIZE; + acx->inactivity_timeout = 0; + break; + default: + wl1271_error("Incorrect acx command id=%x\n", direction); + ret = -EINVAL; + goto out; + } + + ret = wl1271_cmd_configure(wl, + ACX_BA_SESSION_POLICY_CFG, + acx, + sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx ba session setting failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +/* setup BA session receiver setting in the FW. */ +int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn, + bool enable) +{ + struct wl1271_acx_ba_receiver_setup *acx; + int ret; + + wl1271_debug(DEBUG_ACX, "acx ba receiver session setting"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) { + ret = -ENOMEM; + goto out; + } + + /* Single link for now */ + acx->link_id = 1; + acx->tid = tid_index; + acx->enable = enable; + acx->win_size = 0; + acx->ssn = ssn; + + ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx, + sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx ba receiver session failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime) { struct wl1271_acx_fw_tsf_information *tsf_info; @@ -1334,3 +1488,82 @@ out: kfree(tsf_info); return ret; } + +int wl1271_acx_max_tx_retry(struct wl1271 *wl) +{ + struct wl1271_acx_max_tx_retry *acx = NULL; + int ret; + + wl1271_debug(DEBUG_ACX, "acx max tx retry"); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) + return -ENOMEM; + + acx->max_tx_retry = cpu_to_le16(wl->conf.tx.ap_max_tx_retries); + + ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx max tx retry failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} + +int wl1271_acx_config_ps(struct wl1271 *wl) +{ + struct wl1271_acx_config_ps *config_ps; + int ret; + + wl1271_debug(DEBUG_ACX, "acx config ps"); + + config_ps = kzalloc(sizeof(*config_ps), GFP_KERNEL); + if (!config_ps) { + ret = -ENOMEM; + goto out; + } + + config_ps->exit_retries = wl->conf.conn.psm_exit_retries; + config_ps->enter_retries = wl->conf.conn.psm_entry_retries; + config_ps->null_data_rate = cpu_to_le32(wl->basic_rate); + + ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps, + sizeof(*config_ps)); + + if (ret < 0) { + wl1271_warning("acx config ps failed: %d", ret); + goto out; + } + +out: + kfree(config_ps); + return ret; +} + +int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr) +{ + struct wl1271_acx_inconnection_sta *acx = NULL; + int ret; + + wl1271_debug(DEBUG_ACX, "acx set inconnaction sta %pM", addr); + + acx = kzalloc(sizeof(*acx), GFP_KERNEL); + if (!acx) + return -ENOMEM; + + memcpy(acx->addr, addr, ETH_ALEN); + + ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST, + acx, sizeof(*acx)); + if (ret < 0) { + wl1271_warning("acx set inconnaction sta failed: %d", ret); + goto out; + } + +out: + kfree(acx); + return ret; +} diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h index 7bd8e4db4a71..dd19b01d807b 100644 --- a/drivers/net/wireless/wl12xx/acx.h +++ b/drivers/net/wireless/wl12xx/acx.h @@ -133,7 +133,6 @@ enum { #define DEFAULT_UCAST_PRIORITY 0 #define DEFAULT_RX_Q_PRIORITY 0 -#define DEFAULT_NUM_STATIONS 1 #define DEFAULT_RXQ_PRIORITY 0 /* low 0 .. 15 high */ #define DEFAULT_RXQ_TYPE 0x07 /* All frames, Data/Ctrl/Mgmt */ #define TRACE_BUFFER_MAX_SIZE 256 @@ -747,13 +746,23 @@ struct acx_rate_class { #define ACX_TX_BASIC_RATE 0 #define ACX_TX_AP_FULL_RATE 1 #define ACX_TX_RATE_POLICY_CNT 2 -struct acx_rate_policy { +struct acx_sta_rate_policy { struct acx_header header; __le32 rate_class_cnt; struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES]; } __packed; + +#define ACX_TX_AP_MODE_MGMT_RATE 4 +#define ACX_TX_AP_MODE_BCST_RATE 5 +struct acx_ap_rate_policy { + struct acx_header header; + + __le32 rate_policy_idx; + struct acx_rate_class rate_policy; +} __packed; + struct acx_ac_cfg { struct acx_header header; u8 ac; @@ -787,12 +796,9 @@ struct acx_tx_config_options { __le16 tx_compl_threshold; /* number of packets */ } __packed; -#define ACX_RX_MEM_BLOCKS 70 -#define ACX_TX_MIN_MEM_BLOCKS 40 #define ACX_TX_DESCRIPTORS 32 -#define ACX_NUM_SSID_PROFILES 1 -struct wl1271_acx_config_memory { +struct wl1271_acx_ap_config_memory { struct acx_header header; u8 rx_mem_block_num; @@ -802,6 +808,20 @@ struct wl1271_acx_config_memory { __le32 total_tx_descriptors; } __packed; +struct wl1271_acx_sta_config_memory { + struct acx_header header; + + u8 rx_mem_block_num; + u8 tx_min_mem_block_num; + u8 num_stations; + u8 num_ssid_profiles; + __le32 total_tx_descriptors; + u8 dyn_mem_enable; + u8 tx_free_req; + u8 rx_free_req; + u8 tx_min; +} __packed; + struct wl1271_acx_mem_map { struct acx_header header; @@ -1051,6 +1071,59 @@ struct wl1271_acx_ht_information { u8 padding[3]; } __packed; +#define RX_BA_WIN_SIZE 8 + +struct wl1271_acx_ba_session_policy { + struct acx_header header; + /* + * Specifies role Id, Range 0-7, 0xFF means ANY role. + * Future use. For now this field is irrelevant + */ + u8 role_id; + /* + * Specifies Link Id, Range 0-31, 0xFF means ANY Link Id. + * Not applicable if Role Id is set to ANY. + */ + u8 link_id; + + u8 tid; + + u8 enable; + + /* Windows size in number of packets */ + u16 win_size; + + /* + * As initiator inactivity timeout in time units(TU) of 1024us. + * As receiver reserved + */ + u16 inactivity_timeout; + + /* Initiator = 1/Receiver = 0 */ + u8 ba_direction; + + u8 padding[3]; +} __packed; + +struct wl1271_acx_ba_receiver_setup { + struct acx_header header; + + /* Specifies Link Id, Range 0-31, 0xFF means ANY Link Id */ + u8 link_id; + + u8 tid; + + u8 enable; + + u8 padding[1]; + + /* Windows size in number of packets */ + u16 win_size; + + /* BA session starting sequence number. RANGE 0-FFF */ + u16 ssn; +} __packed; + struct wl1271_acx_fw_tsf_information { struct acx_header header; @@ -1062,6 +1135,33 @@ struct wl1271_acx_fw_tsf_information { u8 padding[3]; } __packed; +struct wl1271_acx_max_tx_retry { + struct acx_header header; + + /* + * the number of frames transmission failures before + * issuing the aging event. + */ + __le16 max_tx_retry; + u8 padding_1[2]; +} __packed; + +struct wl1271_acx_config_ps { + struct acx_header header; + + u8 exit_retries; + u8 enter_retries; + u8 padding[2]; + __le32 null_data_rate; +} __packed; + +struct wl1271_acx_inconnection_sta { + struct acx_header header; + + u8 addr[ETH_ALEN]; + u8 padding1[2]; +} __packed; + enum { ACX_WAKE_UP_CONDITIONS = 0x0002, ACX_MEM_CFG = 0x0003, @@ -1113,22 +1213,24 @@ enum { ACX_RSSI_SNR_WEIGHTS = 0x0052, ACX_KEEP_ALIVE_MODE = 0x0053, ACX_SET_KEEP_ALIVE_CONFIG = 0x0054, - ACX_BA_SESSION_RESPONDER_POLICY = 0x0055, - ACX_BA_SESSION_INITIATOR_POLICY = 0x0056, + ACX_BA_SESSION_POLICY_CFG = 0x0055, + ACX_BA_SESSION_RX_SETUP = 0x0056, ACX_PEER_HT_CAP = 0x0057, ACX_HT_BSS_OPERATION = 0x0058, ACX_COEX_ACTIVITY = 0x0059, ACX_SET_DCO_ITRIM_PARAMS = 0x0061, + ACX_GEN_FW_CMD = 0x0070, + ACX_HOST_IF_CFG_BITMAP = 0x0071, + ACX_MAX_TX_FAILURE = 0x0072, + ACX_UPDATE_INCONNECTION_STA_LIST = 0x0073, DOT11_RX_MSDU_LIFE_TIME = 0x1004, DOT11_CUR_TX_PWR = 0x100D, DOT11_RX_DOT11_MODE = 0x1012, DOT11_RTS_THRESHOLD = 0x1013, DOT11_GROUP_ADDRESS_TBL = 0x1014, ACX_PM_CONFIG = 0x1016, - - MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL, - - MAX_IE = 0xFFFF + ACX_CONFIG_PS = 0x1017, + ACX_CONFIG_HANGOVER = 0x1018, }; @@ -1160,7 +1262,9 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble); int wl1271_acx_cts_protect(struct wl1271 *wl, enum acx_ctsprotect_type ctsprotect); int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); -int wl1271_acx_rate_policies(struct wl1271 *wl); +int wl1271_acx_sta_rate_policies(struct wl1271 *wl); +int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, + u8 idx); int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop); int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, @@ -1168,7 +1272,8 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, u32 apsd_conf0, u32 apsd_conf1); int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold); int wl1271_acx_tx_config_options(struct wl1271 *wl); -int wl1271_acx_mem_cfg(struct wl1271 *wl); +int wl1271_acx_ap_mem_cfg(struct wl1271 *wl); +int wl1271_acx_sta_mem_cfg(struct wl1271 *wl); int wl1271_acx_init_mem_config(struct wl1271 *wl); int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); int wl1271_acx_smart_reflex(struct wl1271 *wl); @@ -1185,6 +1290,14 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, bool allow_ht_operation); int wl1271_acx_set_ht_information(struct wl1271 *wl, u16 ht_operation_mode); +int wl1271_acx_set_ba_session(struct wl1271 *wl, + enum ieee80211_back_parties direction, + u8 tid_index, u8 policy); +int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn, + bool enable); int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); +int wl1271_acx_max_tx_retry(struct wl1271 *wl); +int wl1271_acx_config_ps(struct wl1271 *wl); +int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr); #endif /* __WL1271_ACX_H__ */ diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c index 4df04f84d7f1..6934dffd5174 100644 --- a/drivers/net/wireless/wl12xx/boot.c +++ b/drivers/net/wireless/wl12xx/boot.c @@ -28,6 +28,7 @@ #include "boot.h" #include "io.h" #include "event.h" +#include "rx.h" static struct wl1271_partition_set part_table[PART_TABLE_LEN] = { [PART_DOWN] = { @@ -100,6 +101,22 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag) wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); } +static void wl1271_parse_fw_ver(struct wl1271 *wl) +{ + int ret; + + ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u", + &wl->chip.fw_ver[0], &wl->chip.fw_ver[1], + &wl->chip.fw_ver[2], &wl->chip.fw_ver[3], + &wl->chip.fw_ver[4]); + + if (ret != 5) { + wl1271_warning("fw version incorrect value"); + memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver)); + return; + } +} + static void wl1271_boot_fw_version(struct wl1271 *wl) { struct wl1271_static_data static_data; @@ -107,11 +124,13 @@ static void wl1271_boot_fw_version(struct wl1271 *wl) wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data), false); - strncpy(wl->chip.fw_ver, static_data.fw_version, - sizeof(wl->chip.fw_ver)); + strncpy(wl->chip.fw_ver_str, static_data.fw_version, + sizeof(wl->chip.fw_ver_str)); /* make sure the string is NULL-terminated */ - wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0'; + wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0'; + + wl1271_parse_fw_ver(wl); } static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, @@ -231,7 +250,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) */ if (wl->nvs_len == sizeof(struct wl1271_nvs_file) || wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) { - if (wl->nvs->general_params.dual_mode_select) + /* for now 11a is unsupported in AP mode */ + if (wl->bss_type != BSS_TYPE_AP_BSS && + wl->nvs->general_params.dual_mode_select) wl->enable_11a = true; } @@ -431,6 +452,9 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl) PSPOLL_DELIVERY_FAILURE_EVENT_ID | SOFT_GEMINI_SENSE_EVENT_ID; + if (wl->bss_type == BSS_TYPE_AP_BSS) + wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID; + ret = wl1271_event_unmask(wl); if (ret < 0) { wl1271_error("EVENT mask setting failed"); @@ -464,6 +488,9 @@ static void wl1271_boot_hw_version(struct wl1271 *wl) fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET; wl->hw_pg_ver = (s8)fuse; + + if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3) + wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION; } /* uploads NVS and firmware */ @@ -595,8 +622,7 @@ int wl1271_boot(struct wl1271 *wl) wl1271_boot_enable_interrupts(wl); /* set the wl1271 default filters */ - wl->rx_config = WL1271_DEFAULT_RX_CONFIG; - wl->rx_filter = WL1271_DEFAULT_RX_FILTER; + wl1271_set_default_filters(wl); wl1271_event_mbox_config(wl); diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h index d67dcffa31eb..17229b86fc71 100644 --- a/drivers/net/wireless/wl12xx/boot.h +++ b/drivers/net/wireless/wl12xx/boot.h @@ -59,6 +59,11 @@ struct wl1271_static_data { #define PG_VER_MASK 0x3c #define PG_VER_OFFSET 2 +#define PG_MAJOR_VER_MASK 0x3 +#define PG_MAJOR_VER_OFFSET 0x0 +#define PG_MINOR_VER_MASK 0xc +#define PG_MINOR_VER_OFFSET 0x2 + #define CMD_MBOX_ADDRESS 0x407B4 #define POLARITY_LOW BIT(1) diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c index 0106628aa5a2..f0aa7ab97bf7 100644 --- a/drivers/net/wireless/wl12xx/cmd.c +++ b/drivers/net/wireless/wl12xx/cmd.c @@ -36,6 +36,7 @@ #include "wl12xx_80211.h" #include "cmd.h" #include "event.h" +#include "tx.h" #define WL1271_CMD_FAST_POLL_COUNT 50 @@ -62,6 +63,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, cmd->status = 0; WARN_ON(len % 4 != 0); + WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags)); wl1271_write(wl, wl->cmd_box_addr, buf, len, false); @@ -221,7 +223,7 @@ int wl1271_cmd_ext_radio_parms(struct wl1271 *wl) * Poll the mailbox event field until any of the bits in the mask is set or a * timeout occurs (WL1271_EVENT_TIMEOUT in msecs) */ -static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) +static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask) { u32 events_vector, event; unsigned long timeout; @@ -230,7 +232,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) do { if (time_after(jiffies, timeout)) { - ieee80211_queue_work(wl->hw, &wl->recovery_work); + wl1271_debug(DEBUG_CMD, "timeout waiting for event %d", + (int)mask); return -ETIMEDOUT; } @@ -248,6 +251,19 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) return 0; } +static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) +{ + int ret; + + ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask); + if (ret != 0) { + ieee80211_queue_work(wl->hw, &wl->recovery_work); + return ret; + } + + return 0; +} + int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type) { struct wl1271_cmd_join *join; @@ -271,6 +287,7 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type) join->rx_filter_options = cpu_to_le32(wl->rx_filter); join->bss_type = bss_type; join->basic_rate_set = cpu_to_le32(wl->basic_rate_set); + join->supported_rate_set = cpu_to_le32(wl->rate_set); if (wl->band == IEEE80211_BAND_5GHZ) join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ; @@ -288,6 +305,9 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type) wl->tx_security_last_seq = 0; wl->tx_security_seq = 0; + wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x", + join->basic_rate_set, join->supported_rate_set); + ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0); if (ret < 0) { wl1271_error("failed to initiate cmd join"); @@ -439,7 +459,7 @@ out: return ret; } -int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send) +int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode) { struct wl1271_cmd_ps_params *ps_params = NULL; int ret = 0; @@ -453,10 +473,6 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send) } ps_params->ps_mode = ps_mode; - ps_params->send_null_data = send; - ps_params->retries = wl->conf.conn.psm_entry_nullfunc_retries; - ps_params->hang_over_period = wl->conf.conn.psm_entry_hangover_period; - ps_params->null_data_rate = cpu_to_le32(rates); ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, sizeof(*ps_params), 0); @@ -490,8 +506,8 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, cmd->len = cpu_to_le16(buf_len); cmd->template_type = template_id; cmd->enabled_rates = cpu_to_le32(rates); - cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit; - cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit; + cmd->short_retry_limit = wl->conf.tx.tmpl_short_retry_limit; + cmd->long_retry_limit = wl->conf.tx.tmpl_long_retry_limit; cmd->index = index; if (buf) @@ -659,15 +675,15 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr) /* llc layer */ memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header)); - tmpl.llc_type = htons(ETH_P_ARP); + tmpl.llc_type = cpu_to_be16(ETH_P_ARP); /* arp header */ arp_hdr = &tmpl.arp_hdr; - arp_hdr->ar_hrd = htons(ARPHRD_ETHER); - arp_hdr->ar_pro = htons(ETH_P_IP); + arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER); + arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP); arp_hdr->ar_hln = ETH_ALEN; arp_hdr->ar_pln = 4; - arp_hdr->ar_op = htons(ARPOP_REPLY); + arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY); /* arp payload */ memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN); @@ -702,9 +718,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl) wl->basic_rate); } -int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id) +int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id) { - struct wl1271_cmd_set_keys *cmd; + struct wl1271_cmd_set_sta_keys *cmd; int ret = 0; wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id); @@ -731,11 +747,42 @@ out: return ret; } -int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, +int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id) +{ + struct wl1271_cmd_set_ap_keys *cmd; + int ret = 0; + + wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->hlid = WL1271_AP_BROADCAST_HLID; + cmd->key_id = id; + cmd->lid_key_type = WEP_DEFAULT_LID_TYPE; + cmd->key_action = cpu_to_le16(KEY_SET_ID); + cmd->key_type = KEY_WEP; + + ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret); + goto out; + } + +out: + kfree(cmd); + + return ret; +} + +int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, u8 key_size, const u8 *key, const u8 *addr, u32 tx_seq_32, u16 tx_seq_16) { - struct wl1271_cmd_set_keys *cmd; + struct wl1271_cmd_set_sta_keys *cmd; int ret = 0; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -788,6 +835,67 @@ out: return ret; } +int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, + u16 tx_seq_16) +{ + struct wl1271_cmd_set_ap_keys *cmd; + int ret = 0; + u8 lid_type; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + if (hlid == WL1271_AP_BROADCAST_HLID) { + if (key_type == KEY_WEP) + lid_type = WEP_DEFAULT_LID_TYPE; + else + lid_type = BROADCAST_LID_TYPE; + } else { + lid_type = UNICAST_LID_TYPE; + } + + wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d" + " hlid: %d", (int)action, (int)id, (int)lid_type, + (int)key_type, (int)hlid); + + cmd->lid_key_type = lid_type; + cmd->hlid = hlid; + cmd->key_action = cpu_to_le16(action); + cmd->key_size = key_size; + cmd->key_type = key_type; + cmd->key_id = id; + cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16); + cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32); + + if (key_type == KEY_TKIP) { + /* + * We get the key in the following form: + * TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes) + * but the target is expecting: + * TKIP - RX MIC - TX MIC + */ + memcpy(cmd->key, key, 16); + memcpy(cmd->key + 16, key + 24, 8); + memcpy(cmd->key + 24, key + 16, 8); + } else { + memcpy(cmd->key, key, key_size); + } + + wl1271_dump(DEBUG_CRYPT, "TARGET AP KEY: ", cmd, sizeof(*cmd)); + + ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_warning("could not set ap keys"); + goto out; + } + +out: + kfree(cmd); + return ret; +} + int wl1271_cmd_disconnect(struct wl1271 *wl) { struct wl1271_cmd_disconnect *cmd; @@ -850,3 +958,180 @@ out_free: out: return ret; } + +int wl1271_cmd_start_bss(struct wl1271 *wl) +{ + struct wl1271_cmd_bss_start *cmd; + struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd start bss"); + + /* + * FIXME: We currently do not support hidden SSID. The real SSID + * should be fetched from mac80211 first. + */ + if (wl->ssid_len == 0) { + wl1271_warning("Hidden SSID currently not supported for AP"); + ret = -EINVAL; + goto out; + } + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN); + + cmd->aging_period = cpu_to_le16(WL1271_AP_DEF_INACTIV_SEC); + cmd->bss_index = WL1271_AP_BSS_INDEX; + cmd->global_hlid = WL1271_AP_GLOBAL_HLID; + cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID; + cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set); + cmd->beacon_interval = cpu_to_le16(wl->beacon_int); + cmd->dtim_interval = bss_conf->dtim_period; + cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP; + cmd->channel = wl->channel; + cmd->ssid_len = wl->ssid_len; + cmd->ssid_type = SSID_TYPE_PUBLIC; + memcpy(cmd->ssid, wl->ssid, wl->ssid_len); + + switch (wl->band) { + case IEEE80211_BAND_2GHZ: + cmd->band = RADIO_BAND_2_4GHZ; + break; + case IEEE80211_BAND_5GHZ: + cmd->band = RADIO_BAND_5GHZ; + break; + default: + wl1271_warning("bss start - unknown band: %d", (int)wl->band); + cmd->band = RADIO_BAND_2_4GHZ; + break; + } + + ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd start bss"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl1271_cmd_stop_bss(struct wl1271 *wl) +{ + struct wl1271_cmd_bss_start *cmd; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd stop bss"); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->bss_index = WL1271_AP_BSS_INDEX; + + ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd stop bss"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid) +{ + struct wl1271_cmd_add_sta *cmd; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + /* currently we don't support UAPSD */ + cmd->sp_len = 0; + + memcpy(cmd->addr, sta->addr, ETH_ALEN); + cmd->bss_index = WL1271_AP_BSS_INDEX; + cmd->aid = sta->aid; + cmd->hlid = hlid; + + /* + * FIXME: Does STA support QOS? We need to propagate this info from + * hostapd. Currently not that important since this is only used for + * sending the correct flavor of null-data packet in response to a + * trigger. + */ + cmd->wmm = 0; + + cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl, + sta->supp_rates[wl->band])); + + wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates); + + ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd add sta"); + goto out_free; + } + +out_free: + kfree(cmd); + +out: + return ret; +} + +int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid) +{ + struct wl1271_cmd_remove_sta *cmd; + int ret; + + wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid); + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + ret = -ENOMEM; + goto out; + } + + cmd->hlid = hlid; + /* We never send a deauth, mac80211 is in charge of this */ + cmd->reason_opcode = 0; + cmd->send_deauth_flag = 0; + + ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0); + if (ret < 0) { + wl1271_error("failed to initiate cmd remove sta"); + goto out_free; + } + + /* + * We are ok with a timeout here. The event is sometimes not sent + * due to a firmware bug. + */ + wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID); + +out_free: + kfree(cmd); + +out: + return ret; +} diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h index 2a1d9db7ceb8..54c12e71417e 100644 --- a/drivers/net/wireless/wl12xx/cmd.h +++ b/drivers/net/wireless/wl12xx/cmd.h @@ -39,7 +39,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); int wl1271_cmd_data_path(struct wl1271 *wl, bool enable); -int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, u32 rates, bool send); +int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode); int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, size_t len); int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, @@ -54,12 +54,20 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl, int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr); int wl1271_build_qos_null_data(struct wl1271 *wl); int wl1271_cmd_build_klv_null_data(struct wl1271 *wl); -int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id); -int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, - u8 key_size, const u8 *key, const u8 *addr, - u32 tx_seq_32, u16 tx_seq_16); +int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id); +int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id); +int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, const u8 *addr, + u32 tx_seq_32, u16 tx_seq_16); +int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, + u16 tx_seq_16); int wl1271_cmd_disconnect(struct wl1271 *wl); int wl1271_cmd_set_sta_state(struct wl1271 *wl); +int wl1271_cmd_start_bss(struct wl1271 *wl); +int wl1271_cmd_stop_bss(struct wl1271 *wl); +int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid); +int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid); enum wl1271_commands { CMD_INTERROGATE = 1, /*use this to read information elements*/ @@ -98,6 +106,12 @@ enum wl1271_commands { CMD_STOP_PERIODIC_SCAN = 51, CMD_SET_STA_STATE = 52, + /* AP mode commands */ + CMD_BSS_START = 60, + CMD_BSS_STOP = 61, + CMD_ADD_STA = 62, + CMD_REMOVE_STA = 63, + NUM_COMMANDS, MAX_COMMAND_ID = 0xFFFF, }; @@ -126,6 +140,14 @@ enum cmd_templ { * For CTS-to-self (FastCTS) mechanism * for BT/WLAN coexistence (SoftGemini). */ CMD_TEMPL_ARP_RSP, + CMD_TEMPL_LINK_MEASUREMENT_REPORT, + + /* AP-mode specific */ + CMD_TEMPL_AP_BEACON = 13, + CMD_TEMPL_AP_PROBE_RESPONSE, + CMD_TEMPL_AP_ARP_RSP, + CMD_TEMPL_DEAUTH_AP, + CMD_TEMPL_MAX = 0xff }; @@ -195,6 +217,7 @@ struct wl1271_cmd_join { * ACK or CTS frames). */ __le32 basic_rate_set; + __le32 supported_rate_set; u8 dtim_interval; /* * bits 0-2: This bitwise field specifies the type @@ -257,20 +280,11 @@ struct wl1271_cmd_ps_params { struct wl1271_cmd_header header; u8 ps_mode; /* STATION_* */ - u8 send_null_data; /* Do we have to send NULL data packet ? */ - u8 retries; /* Number of retires for the initial NULL data packet */ - - /* - * TUs during which the target stays awake after switching - * to power save mode. - */ - u8 hang_over_period; - __le32 null_data_rate; + u8 padding[3]; } __packed; /* HW encryption keys */ #define NUM_ACCESS_CATEGORIES_COPY 4 -#define MAX_KEY_SIZE 32 enum wl1271_cmd_key_action { KEY_ADD_OR_REPLACE = 1, @@ -289,7 +303,7 @@ enum wl1271_cmd_key_type { /* FIXME: Add description for key-types */ -struct wl1271_cmd_set_keys { +struct wl1271_cmd_set_sta_keys { struct wl1271_cmd_header header; /* Ignored for default WEP key */ @@ -318,6 +332,57 @@ struct wl1271_cmd_set_keys { __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; } __packed; +enum wl1271_cmd_lid_key_type { + UNICAST_LID_TYPE = 0, + BROADCAST_LID_TYPE = 1, + WEP_DEFAULT_LID_TYPE = 2 +}; + +struct wl1271_cmd_set_ap_keys { + struct wl1271_cmd_header header; + + /* + * Indicates whether the HLID is a unicast key set + * or broadcast key set. A special value 0xFF is + * used to indicate that the HLID is on WEP-default + * (multi-hlids). of type wl1271_cmd_lid_key_type. + */ + u8 hlid; + + /* + * In WEP-default network (hlid == 0xFF) used to + * indicate which network STA/IBSS/AP role should be + * changed + */ + u8 lid_key_type; + + /* + * Key ID - For TKIP and AES key types, this field + * indicates the value that should be inserted into + * the KeyID field of frames transmitted using this + * key entry. For broadcast keys the index use as a + * marker for TX/RX key. + * For WEP default network (HLID=0xFF), this field + * indicates the ID of the key to add or remove. + */ + u8 key_id; + u8 reserved_1; + + /* key_action_e */ + __le16 key_action; + + /* key size in bytes */ + u8 key_size; + + /* key_type_e */ + u8 key_type; + + /* This field holds the security key data to add to the STA table */ + u8 key[MAX_KEY_SIZE]; + __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; + __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; +} __packed; + struct wl1271_cmd_test_header { u8 id; u8 padding[3]; @@ -412,4 +477,68 @@ struct wl1271_cmd_set_sta_state { u8 padding[3]; } __packed; +enum wl1271_ssid_type { + SSID_TYPE_PUBLIC = 0, + SSID_TYPE_HIDDEN = 1 +}; + +struct wl1271_cmd_bss_start { + struct wl1271_cmd_header header; + + /* wl1271_ssid_type */ + u8 ssid_type; + u8 ssid_len; + u8 ssid[IW_ESSID_MAX_SIZE]; + u8 padding_1[2]; + + /* Basic rate set */ + __le32 basic_rate_set; + /* Aging period in seconds*/ + __le16 aging_period; + + /* + * This field specifies the time between target beacon + * transmission times (TBTTs), in time units (TUs). + * Valid values are 1 to 1024. + */ + __le16 beacon_interval; + u8 bssid[ETH_ALEN]; + u8 bss_index; + /* Radio band */ + u8 band; + u8 channel; + /* The host link id for the AP's global queue */ + u8 global_hlid; + /* The host link id for the AP's broadcast queue */ + u8 broadcast_hlid; + /* DTIM count */ + u8 dtim_interval; + /* Beacon expiry time in ms */ + u8 beacon_expiry; + u8 padding_2[3]; +} __packed; + +struct wl1271_cmd_add_sta { + struct wl1271_cmd_header header; + + u8 addr[ETH_ALEN]; + u8 hlid; + u8 aid; + u8 psd_type[NUM_ACCESS_CATEGORIES_COPY]; + __le32 supported_rates; + u8 bss_index; + u8 sp_len; + u8 wmm; + u8 padding1; +} __packed; + +struct wl1271_cmd_remove_sta { + struct wl1271_cmd_header header; + + u8 hlid; + u8 reason_opcode; + u8 send_deauth_flag; + u8 padding1; +} __packed; + #endif /* __WL1271_CMD_H__ */ diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h index a16b3616e430..856a8a2fff4f 100644 --- a/drivers/net/wireless/wl12xx/conf.h +++ b/drivers/net/wireless/wl12xx/conf.h @@ -496,6 +496,26 @@ struct conf_rx_settings { CONF_HW_BIT_RATE_2MBPS) #define CONF_TX_RATE_RETRY_LIMIT 10 +/* + * Rates supported for data packets when operating as AP. Note the absense + * of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop + * one. The rate dropped is not mandatory under any operating mode. + */ +#define CONF_TX_AP_ENABLED_RATES (CONF_HW_BIT_RATE_1MBPS | \ + CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \ + CONF_HW_BIT_RATE_6MBPS | CONF_HW_BIT_RATE_9MBPS | \ + CONF_HW_BIT_RATE_11MBPS | CONF_HW_BIT_RATE_12MBPS | \ + CONF_HW_BIT_RATE_18MBPS | CONF_HW_BIT_RATE_24MBPS | \ + CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \ + CONF_HW_BIT_RATE_54MBPS) + +/* + * Default rates for management traffic when operating in AP mode. This + * should be configured according to the basic rate set of the AP + */ +#define CONF_TX_AP_DEFAULT_MGMT_RATES (CONF_HW_BIT_RATE_1MBPS | \ + CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS) + struct conf_tx_rate_class { /* @@ -636,9 +656,9 @@ struct conf_tx_settings { /* * Configuration for rate classes for TX (currently only one - * rate class supported.) + * rate class supported). Used in non-AP mode. */ - struct conf_tx_rate_class rc_conf; + struct conf_tx_rate_class sta_rc_conf; /* * Configuration for access categories for TX rate control. @@ -647,6 +667,28 @@ struct conf_tx_settings { struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT]; /* + * Configuration for rate classes in AP-mode. These rate classes + * are for the AC TX queues + */ + struct conf_tx_rate_class ap_rc_conf[CONF_TX_MAX_AC_COUNT]; + + /* + * Management TX rate class for AP-mode. + */ + struct conf_tx_rate_class ap_mgmt_conf; + + /* + * Broadcast TX rate class for AP-mode. + */ + struct conf_tx_rate_class ap_bcst_conf; + + /* + * AP-mode - allow this number of TX retries to a station before an + * event is triggered from FW. + */ + u16 ap_max_tx_retries; + + /* * Configuration for TID parameters. */ u8 tid_conf_count; @@ -687,6 +729,12 @@ struct conf_tx_settings { * Range: CONF_HW_BIT_RATE_* bit mask */ u32 basic_rate_5; + + /* + * TX retry limits for templates + */ + u8 tmpl_short_retry_limit; + u8 tmpl_long_retry_limit; }; enum { @@ -912,6 +960,14 @@ struct conf_conn_settings { u8 psm_entry_retries; /* + * Specifies the maximum number of times to try PSM exit if it fails + * (if sending the appropriate null-func message fails.) + * + * Range 0 - 255 + */ + u8 psm_exit_retries; + + /* * Specifies the maximum number of times to try transmit the PSM entry * null-func frame for each PSM entry attempt * @@ -1036,30 +1092,30 @@ struct conf_scan_settings { /* * The minimum time to wait on each channel for active scans * - * Range: 0 - 65536 tu + * Range: u32 tu/1000 */ - u16 min_dwell_time_active; + u32 min_dwell_time_active; /* * The maximum time to wait on each channel for active scans * - * Range: 0 - 65536 tu + * Range: u32 tu/1000 */ - u16 max_dwell_time_active; + u32 max_dwell_time_active; /* - * The maximum time to wait on each channel for passive scans + * The minimum time to wait on each channel for passive scans * - * Range: 0 - 65536 tu + * Range: u32 tu/1000 */ - u16 min_dwell_time_passive; + u32 min_dwell_time_passive; /* * The maximum time to wait on each channel for passive scans * - * Range: 0 - 65536 tu + * Range: u32 tu/1000 */ - u16 max_dwell_time_passive; + u32 max_dwell_time_passive; /* * Number of probe requests to transmit on each active scan channel @@ -1090,6 +1146,51 @@ struct conf_rf_settings { u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5]; }; +struct conf_ht_setting { + u16 tx_ba_win_size; + u16 inactivity_timeout; +}; + +struct conf_memory_settings { + /* Number of stations supported in IBSS mode */ + u8 num_stations; + + /* Number of ssid profiles used in IBSS mode */ + u8 ssid_profiles; + + /* Number of memory buffers allocated to rx pool */ + u8 rx_block_num; + + /* Minimum number of blocks allocated to tx pool */ + u8 tx_min_block_num; + + /* Disable/Enable dynamic memory */ + u8 dynamic_memory; + + /* + * Minimum required free tx memory blocks in order to assure optimum + * performence + * + * Range: 0-120 + */ + u8 min_req_tx_blocks; + + /* + * Minimum required free rx memory blocks in order to assure optimum + * performence + * + * Range: 0-120 + */ + u8 min_req_rx_blocks; + + /* + * Minimum number of mem blocks (free+used) guaranteed for TX + * + * Range: 0-120 + */ + u8 tx_min; +}; + struct conf_drv_settings { struct conf_sg_settings sg; struct conf_rx_settings rx; @@ -1100,6 +1201,8 @@ struct conf_drv_settings { struct conf_roam_trigger_settings roam_trigger; struct conf_scan_settings scan; struct conf_rf_settings rf; + struct conf_ht_setting ht; + struct conf_memory_settings mem; }; #endif diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c index ec6077760157..8e75b09723b9 100644 --- a/drivers/net/wireless/wl12xx/debugfs.c +++ b/drivers/net/wireless/wl12xx/debugfs.c @@ -99,7 +99,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl) mutex_lock(&wl->mutex); - ret = wl1271_ps_elp_wakeup(wl, false); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; @@ -261,27 +261,25 @@ static ssize_t gpio_power_write(struct file *file, unsigned long value; int ret; - mutex_lock(&wl->mutex); - len = min(count, sizeof(buf) - 1); if (copy_from_user(buf, user_buf, len)) { - ret = -EFAULT; - goto out; + return -EFAULT; } buf[len] = '\0'; ret = strict_strtoul(buf, 0, &value); if (ret < 0) { wl1271_warning("illegal value in gpio_power"); - goto out; + return -EINVAL; } + mutex_lock(&wl->mutex); + if (value) wl1271_power_on(wl); else wl1271_power_off(wl); -out: mutex_unlock(&wl->mutex); return count; } @@ -293,12 +291,13 @@ static const struct file_operations gpio_power_ops = { .llseek = default_llseek, }; -static int wl1271_debugfs_add_files(struct wl1271 *wl) +static int wl1271_debugfs_add_files(struct wl1271 *wl, + struct dentry *rootdir) { int ret = 0; struct dentry *entry, *stats; - stats = debugfs_create_dir("fw-statistics", wl->rootdir); + stats = debugfs_create_dir("fw-statistics", rootdir); if (!stats || IS_ERR(stats)) { entry = stats; goto err; @@ -395,16 +394,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl) DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data); DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data); - DEBUGFS_ADD(tx_queue_len, wl->rootdir); - DEBUGFS_ADD(retry_count, wl->rootdir); - DEBUGFS_ADD(excessive_retries, wl->rootdir); - - DEBUGFS_ADD(gpio_power, wl->rootdir); + DEBUGFS_ADD(tx_queue_len, rootdir); + DEBUGFS_ADD(retry_count, rootdir); + DEBUGFS_ADD(excessive_retries, rootdir); - entry = debugfs_create_x32("debug_level", 0600, wl->rootdir, - &wl12xx_debug_level); - if (!entry || IS_ERR(entry)) - goto err; + DEBUGFS_ADD(gpio_power, rootdir); return 0; @@ -419,7 +413,7 @@ err: void wl1271_debugfs_reset(struct wl1271 *wl) { - if (!wl->rootdir) + if (!wl->stats.fw_stats) return; memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats)); @@ -430,13 +424,13 @@ void wl1271_debugfs_reset(struct wl1271 *wl) int wl1271_debugfs_init(struct wl1271 *wl) { int ret; + struct dentry *rootdir; - wl->rootdir = debugfs_create_dir(KBUILD_MODNAME, - wl->hw->wiphy->debugfsdir); + rootdir = debugfs_create_dir(KBUILD_MODNAME, + wl->hw->wiphy->debugfsdir); - if (IS_ERR(wl->rootdir)) { - ret = PTR_ERR(wl->rootdir); - wl->rootdir = NULL; + if (IS_ERR(rootdir)) { + ret = PTR_ERR(rootdir); goto err; } @@ -450,7 +444,7 @@ int wl1271_debugfs_init(struct wl1271 *wl) wl->stats.fw_stats_update = jiffies; - ret = wl1271_debugfs_add_files(wl); + ret = wl1271_debugfs_add_files(wl, rootdir); if (ret < 0) goto err_file; @@ -462,8 +456,7 @@ err_file: wl->stats.fw_stats = NULL; err_fw: - debugfs_remove_recursive(wl->rootdir); - wl->rootdir = NULL; + debugfs_remove_recursive(rootdir); err: return ret; @@ -473,8 +466,4 @@ void wl1271_debugfs_exit(struct wl1271 *wl) { kfree(wl->stats.fw_stats); wl->stats.fw_stats = NULL; - - debugfs_remove_recursive(wl->rootdir); - wl->rootdir = NULL; - } diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c index f9146f5242fb..1b170c5cc595 100644 --- a/drivers/net/wireless/wl12xx/event.c +++ b/drivers/net/wireless/wl12xx/event.c @@ -135,20 +135,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl, /* go to extremely low power mode */ wl1271_ps_elp_sleep(wl); break; - case EVENT_EXIT_POWER_SAVE_FAIL: - wl1271_debug(DEBUG_PSM, "PSM exit failed"); - - if (test_bit(WL1271_FLAG_PSM, &wl->flags)) { - wl->psm_entry_retry = 0; - break; - } - - /* make sure the firmware goes to active mode - the frame to - be sent next will indicate to the AP, that we are active. */ - ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, - wl->basic_rate, false); - break; - case EVENT_EXIT_POWER_SAVE_SUCCESS: default: break; } @@ -186,6 +172,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) int ret; u32 vector; bool beacon_loss = false; + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); wl1271_event_mbox_dump(mbox); @@ -218,21 +205,21 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) * BSS_LOSE_EVENT, beacon loss has to be reported to the stack. * */ - if (vector & BSS_LOSE_EVENT_ID) { + if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) { wl1271_info("Beacon loss detected."); /* indicate to the stack, that beacons have been lost */ beacon_loss = true; } - if (vector & PS_REPORT_EVENT_ID) { + if ((vector & PS_REPORT_EVENT_ID) && !is_ap) { wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT"); ret = wl1271_event_ps_report(wl, mbox, &beacon_loss); if (ret < 0) return ret; } - if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) + if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap) wl1271_event_pspoll_delivery_fail(wl); if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) { diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h index 6cce0143adb5..0e80886f3031 100644 --- a/drivers/net/wireless/wl12xx/event.h +++ b/drivers/net/wireless/wl12xx/event.h @@ -59,6 +59,7 @@ enum { BSS_LOSE_EVENT_ID = BIT(18), REGAINED_BSS_EVENT_ID = BIT(19), ROAMING_TRIGGER_MAX_TX_RETRY_EVENT_ID = BIT(20), + STA_REMOVE_COMPLETE_EVENT_ID = BIT(21), /* AP */ SOFT_GEMINI_SENSE_EVENT_ID = BIT(22), SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23), SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24), @@ -74,8 +75,6 @@ enum { enum { EVENT_ENTER_POWER_SAVE_FAIL = 0, EVENT_ENTER_POWER_SAVE_SUCCESS, - EVENT_EXIT_POWER_SAVE_FAIL, - EVENT_EXIT_POWER_SAVE_SUCCESS, }; struct event_debug_report { @@ -115,7 +114,12 @@ struct event_mailbox { u8 scheduled_scan_status; u8 ps_status; - u8 reserved_5[29]; + /* AP FW only */ + u8 hlid_removed; + __le16 sta_aging_status; + __le16 sta_tx_retry_exceeded; + + u8 reserved_5[24]; } __packed; int wl1271_event_unmask(struct wl1271 *wl); diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c index 785a5304bfc4..6072fe457135 100644 --- a/drivers/net/wireless/wl12xx/init.c +++ b/drivers/net/wireless/wl12xx/init.c @@ -30,27 +30,9 @@ #include "acx.h" #include "cmd.h" #include "reg.h" +#include "tx.h" -static int wl1271_init_hwenc_config(struct wl1271 *wl) -{ - int ret; - - ret = wl1271_acx_feature_cfg(wl); - if (ret < 0) { - wl1271_warning("couldn't set feature config"); - return ret; - } - - ret = wl1271_cmd_set_default_wep_key(wl, wl->default_key); - if (ret < 0) { - wl1271_warning("couldn't set default key"); - return ret; - } - - return 0; -} - -int wl1271_init_templates_config(struct wl1271 *wl) +int wl1271_sta_init_templates_config(struct wl1271 *wl) { int ret, i; @@ -118,6 +100,132 @@ int wl1271_init_templates_config(struct wl1271 *wl) return 0; } +static int wl1271_ap_init_deauth_template(struct wl1271 *wl) +{ + struct wl12xx_disconn_template *tmpl; + int ret; + + tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); + if (!tmpl) { + ret = -ENOMEM; + goto out; + } + + tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_DEAUTH); + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, + tmpl, sizeof(*tmpl), 0, + wl1271_tx_min_rate_get(wl)); + +out: + kfree(tmpl); + return ret; +} + +static int wl1271_ap_init_null_template(struct wl1271 *wl) +{ + struct ieee80211_hdr_3addr *nullfunc; + int ret; + + nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL); + if (!nullfunc) { + ret = -ENOMEM; + goto out; + } + + nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC | + IEEE80211_FCTL_FROMDS); + + /* nullfunc->addr1 is filled by FW */ + + memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN); + memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN); + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc, + sizeof(*nullfunc), 0, + wl1271_tx_min_rate_get(wl)); + +out: + kfree(nullfunc); + return ret; +} + +static int wl1271_ap_init_qos_null_template(struct wl1271 *wl) +{ + struct ieee80211_qos_hdr *qosnull; + int ret; + + qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL); + if (!qosnull) { + ret = -ENOMEM; + goto out; + } + + qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_NULLFUNC | + IEEE80211_FCTL_FROMDS); + + /* qosnull->addr1 is filled by FW */ + + memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN); + memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN); + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull, + sizeof(*qosnull), 0, + wl1271_tx_min_rate_get(wl)); + +out: + kfree(qosnull); + return ret; +} + +static int wl1271_ap_init_templates_config(struct wl1271 *wl) +{ + int ret; + + /* + * Put very large empty placeholders for all templates. These + * reserve memory for later. + */ + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL, + sizeof + (struct wl12xx_probe_resp_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL, + sizeof + (struct wl12xx_beacon_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL, + sizeof + (struct wl12xx_disconn_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL, + sizeof(struct wl12xx_null_data_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL, + sizeof + (struct wl12xx_qos_null_data_template), + 0, WL1271_RATE_AUTOMATIC); + if (ret < 0) + return ret; + + return 0; +} + static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter) { int ret; @@ -145,10 +253,6 @@ int wl1271_init_phy_config(struct wl1271 *wl) if (ret < 0) return ret; - ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0); - if (ret < 0) - return ret; - ret = wl1271_acx_service_period_timeout(wl); if (ret < 0) return ret; @@ -213,11 +317,199 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl) return 0; } +static int wl1271_sta_hw_init(struct wl1271 *wl) +{ + int ret; + + ret = wl1271_cmd_ext_radio_parms(wl); + if (ret < 0) + return ret; + + /* PS config */ + ret = wl1271_acx_config_ps(wl); + if (ret < 0) + return ret; + + ret = wl1271_sta_init_templates_config(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0); + if (ret < 0) + return ret; + + /* Initialize connection monitoring thresholds */ + ret = wl1271_acx_conn_monit_params(wl, false); + if (ret < 0) + return ret; + + /* Beacon filtering */ + ret = wl1271_init_beacon_filter(wl); + if (ret < 0) + return ret; + + /* Bluetooth WLAN coexistence */ + ret = wl1271_init_pta(wl); + if (ret < 0) + return ret; + + /* Beacons and broadcast settings */ + ret = wl1271_init_beacon_broadcast(wl); + if (ret < 0) + return ret; + + /* Configure for ELP power saving */ + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); + if (ret < 0) + return ret; + + /* Configure rssi/snr averaging weights */ + ret = wl1271_acx_rssi_snr_avg_weights(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_sta_rate_policies(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_sta_mem_cfg(wl); + if (ret < 0) + return ret; + + return 0; +} + +static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl) +{ + int ret, i; + + ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key); + if (ret < 0) { + wl1271_warning("couldn't set default key"); + return ret; + } + + /* disable all keep-alive templates */ + for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { + ret = wl1271_acx_keep_alive_config(wl, i, + ACX_KEEP_ALIVE_TPL_INVALID); + if (ret < 0) + return ret; + } + + /* disable the keep-alive feature */ + ret = wl1271_acx_keep_alive_mode(wl, false); + if (ret < 0) + return ret; + + return 0; +} + +static int wl1271_ap_hw_init(struct wl1271 *wl) +{ + int ret, i; + + ret = wl1271_ap_init_templates_config(wl); + if (ret < 0) + return ret; + + /* Configure for power always on */ + ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); + if (ret < 0) + return ret; + + /* Configure initial TX rate classes */ + for (i = 0; i < wl->conf.tx.ac_conf_count; i++) { + ret = wl1271_acx_ap_rate_policy(wl, + &wl->conf.tx.ap_rc_conf[i], i); + if (ret < 0) + return ret; + } + + ret = wl1271_acx_ap_rate_policy(wl, + &wl->conf.tx.ap_mgmt_conf, + ACX_TX_AP_MODE_MGMT_RATE); + if (ret < 0) + return ret; + + ret = wl1271_acx_ap_rate_policy(wl, + &wl->conf.tx.ap_bcst_conf, + ACX_TX_AP_MODE_BCST_RATE); + if (ret < 0) + return ret; + + ret = wl1271_acx_max_tx_retry(wl); + if (ret < 0) + return ret; + + ret = wl1271_acx_ap_mem_cfg(wl); + if (ret < 0) + return ret; + + return 0; +} + +static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl) +{ + int ret; + + ret = wl1271_ap_init_deauth_template(wl); + if (ret < 0) + return ret; + + ret = wl1271_ap_init_null_template(wl); + if (ret < 0) + return ret; + + ret = wl1271_ap_init_qos_null_template(wl); + if (ret < 0) + return ret; + + return 0; +} + +static void wl1271_check_ba_support(struct wl1271 *wl) +{ + /* validate FW cose ver x.x.x.50-60.x */ + if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) && + (wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) { + wl->ba_support = true; + return; + } + + wl->ba_support = false; +} + +static int wl1271_set_ba_policies(struct wl1271 *wl) +{ + u8 tid_index; + int ret = 0; + + /* Reset the BA RX indicators */ + wl->ba_rx_bitmap = 0; + + /* validate that FW support BA */ + wl1271_check_ba_support(wl); + + if (wl->ba_support) + /* 802.11n initiator BA session setting */ + for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT; + ++tid_index) { + ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR, + tid_index, true); + if (ret < 0) + break; + } + + return ret; +} + int wl1271_hw_init(struct wl1271 *wl) { struct conf_tx_ac_category *conf_ac; struct conf_tx_tid *conf_tid; int ret, i; + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); ret = wl1271_cmd_general_parms(wl); if (ret < 0) @@ -227,12 +519,12 @@ int wl1271_hw_init(struct wl1271 *wl) if (ret < 0) return ret; - ret = wl1271_cmd_ext_radio_parms(wl); - if (ret < 0) - return ret; + /* Mode specific init */ + if (is_ap) + ret = wl1271_ap_hw_init(wl); + else + ret = wl1271_sta_hw_init(wl); - /* Template settings */ - ret = wl1271_init_templates_config(wl); if (ret < 0) return ret; @@ -259,16 +551,6 @@ int wl1271_hw_init(struct wl1271 *wl) if (ret < 0) goto out_free_memmap; - /* Initialize connection monitoring thresholds */ - ret = wl1271_acx_conn_monit_params(wl, false); - if (ret < 0) - goto out_free_memmap; - - /* Beacon filtering */ - ret = wl1271_init_beacon_filter(wl); - if (ret < 0) - goto out_free_memmap; - /* Configure TX patch complete interrupt behavior */ ret = wl1271_acx_tx_config_options(wl); if (ret < 0) @@ -279,21 +561,11 @@ int wl1271_hw_init(struct wl1271 *wl) if (ret < 0) goto out_free_memmap; - /* Bluetooth WLAN coexistence */ - ret = wl1271_init_pta(wl); - if (ret < 0) - goto out_free_memmap; - /* Energy detection */ ret = wl1271_init_energy_detection(wl); if (ret < 0) goto out_free_memmap; - /* Beacons and boradcast settings */ - ret = wl1271_init_beacon_broadcast(wl); - if (ret < 0) - goto out_free_memmap; - /* Default fragmentation threshold */ ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold); if (ret < 0) @@ -321,23 +593,13 @@ int wl1271_hw_init(struct wl1271 *wl) goto out_free_memmap; } - /* Configure TX rate classes */ - ret = wl1271_acx_rate_policies(wl); - if (ret < 0) - goto out_free_memmap; - /* Enable data path */ ret = wl1271_cmd_data_path(wl, 1); if (ret < 0) goto out_free_memmap; - /* Configure for ELP power saving */ - ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); - if (ret < 0) - goto out_free_memmap; - /* Configure HW encryption */ - ret = wl1271_init_hwenc_config(wl); + ret = wl1271_acx_feature_cfg(wl); if (ret < 0) goto out_free_memmap; @@ -346,21 +608,17 @@ int wl1271_hw_init(struct wl1271 *wl) if (ret < 0) goto out_free_memmap; - /* disable all keep-alive templates */ - for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { - ret = wl1271_acx_keep_alive_config(wl, i, - ACX_KEEP_ALIVE_TPL_INVALID); - if (ret < 0) - goto out_free_memmap; - } + /* Mode specific init - post mem init */ + if (is_ap) + ret = wl1271_ap_hw_init_post_mem(wl); + else + ret = wl1271_sta_hw_init_post_mem(wl); - /* disable the keep-alive feature */ - ret = wl1271_acx_keep_alive_mode(wl, false); if (ret < 0) goto out_free_memmap; - /* Configure rssi/snr averaging weights */ - ret = wl1271_acx_rssi_snr_avg_weights(wl); + /* Configure initiator BA sessions policies */ + ret = wl1271_set_ba_policies(wl); if (ret < 0) goto out_free_memmap; diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h index 7762421f8602..3a8bd3f426d2 100644 --- a/drivers/net/wireless/wl12xx/init.h +++ b/drivers/net/wireless/wl12xx/init.h @@ -27,7 +27,7 @@ #include "wl12xx.h" int wl1271_hw_init_power_auth(struct wl1271 *wl); -int wl1271_init_templates_config(struct wl1271 *wl); +int wl1271_sta_init_templates_config(struct wl1271 *wl); int wl1271_init_phy_config(struct wl1271 *wl); int wl1271_init_pta(struct wl1271 *wl); int wl1271_init_energy_detection(struct wl1271 *wl); diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h index 844b32b170bb..c1aac8292089 100644 --- a/drivers/net/wireless/wl12xx/io.h +++ b/drivers/net/wireless/wl12xx/io.h @@ -168,5 +168,6 @@ void wl1271_unregister_hw(struct wl1271 *wl); int wl1271_init_ieee80211(struct wl1271 *wl); struct ieee80211_hw *wl1271_alloc_hw(void); int wl1271_free_hw(struct wl1271 *wl); +irqreturn_t wl1271_irq(int irq, void *data); #endif diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c index 062247ef3ad2..8b3c8d196b03 100644 --- a/drivers/net/wireless/wl12xx/main.c +++ b/drivers/net/wireless/wl12xx/main.c @@ -116,11 +116,11 @@ static struct conf_drv_settings default_conf = { }, .tx = { .tx_energy_detection = 0, - .rc_conf = { + .sta_rc_conf = { .enabled_rates = 0, .short_retry_limit = 10, .long_retry_limit = 10, - .aflags = 0 + .aflags = 0, }, .ac_conf_count = 4, .ac_conf = { @@ -153,6 +153,45 @@ static struct conf_drv_settings default_conf = { .tx_op_limit = 1504, }, }, + .ap_rc_conf = { + [0] = { + .enabled_rates = CONF_TX_AP_ENABLED_RATES, + .short_retry_limit = 10, + .long_retry_limit = 10, + .aflags = 0, + }, + [1] = { + .enabled_rates = CONF_TX_AP_ENABLED_RATES, + .short_retry_limit = 10, + .long_retry_limit = 10, + .aflags = 0, + }, + [2] = { + .enabled_rates = CONF_TX_AP_ENABLED_RATES, + .short_retry_limit = 10, + .long_retry_limit = 10, + .aflags = 0, + }, + [3] = { + .enabled_rates = CONF_TX_AP_ENABLED_RATES, + .short_retry_limit = 10, + .long_retry_limit = 10, + .aflags = 0, + }, + }, + .ap_mgmt_conf = { + .enabled_rates = CONF_TX_AP_DEFAULT_MGMT_RATES, + .short_retry_limit = 10, + .long_retry_limit = 10, + .aflags = 0, + }, + .ap_bcst_conf = { + .enabled_rates = CONF_HW_BIT_RATE_1MBPS, + .short_retry_limit = 10, + .long_retry_limit = 10, + .aflags = 0, + }, + .ap_max_tx_retries = 100, .tid_conf_count = 4, .tid_conf = { [CONF_TX_AC_BE] = { @@ -193,6 +232,8 @@ static struct conf_drv_settings default_conf = { .tx_compl_threshold = 4, .basic_rate = CONF_HW_BIT_RATE_1MBPS, .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS, + .tmpl_short_retry_limit = 10, + .tmpl_long_retry_limit = 10, }, .conn = { .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, @@ -215,6 +256,7 @@ static struct conf_drv_settings default_conf = { .bet_enable = CONF_BET_MODE_ENABLE, .bet_max_consecutive = 10, .psm_entry_retries = 5, + .psm_exit_retries = 255, .psm_entry_nullfunc_retries = 3, .psm_entry_hangover_period = 1, .keep_alive_interval = 55000, @@ -233,13 +275,13 @@ static struct conf_drv_settings default_conf = { .avg_weight_rssi_beacon = 20, .avg_weight_rssi_data = 10, .avg_weight_snr_beacon = 20, - .avg_weight_snr_data = 10 + .avg_weight_snr_data = 10, }, .scan = { .min_dwell_time_active = 7500, .max_dwell_time_active = 30000, - .min_dwell_time_passive = 30000, - .max_dwell_time_passive = 60000, + .min_dwell_time_passive = 100000, + .max_dwell_time_passive = 100000, .num_probe_reqs = 2, }, .rf = { @@ -252,9 +294,24 @@ static struct conf_drv_settings default_conf = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }, }, + .ht = { + .tx_ba_win_size = 64, + .inactivity_timeout = 10000, + }, + .mem = { + .num_stations = 1, + .ssid_profiles = 1, + .rx_block_num = 70, + .tx_min_block_num = 40, + .dynamic_memory = 0, + .min_req_tx_blocks = 100, + .min_req_rx_blocks = 22, + .tx_min = 27, + } }; static void __wl1271_op_remove_interface(struct wl1271 *wl); +static void wl1271_free_ap_keys(struct wl1271 *wl); static void wl1271_device_release(struct device *dev) @@ -317,7 +374,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what, if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) goto out; - ret = wl1271_ps_elp_wakeup(wl, false); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; @@ -393,7 +450,7 @@ static int wl1271_plt_init(struct wl1271 *wl) if (ret < 0) return ret; - ret = wl1271_init_templates_config(wl); + ret = wl1271_sta_init_templates_config(wl); if (ret < 0) return ret; @@ -425,6 +482,10 @@ static int wl1271_plt_init(struct wl1271 *wl) if (ret < 0) goto out_free_memmap; + ret = wl1271_acx_sta_mem_cfg(wl); + if (ret < 0) + goto out_free_memmap; + /* Default fragmentation threshold */ ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold); if (ret < 0) @@ -476,14 +537,71 @@ static int wl1271_plt_init(struct wl1271 *wl) return ret; } +static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks) +{ + bool fw_ps; + + /* only regulate station links */ + if (hlid < WL1271_AP_STA_HLID_START) + return; + + fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); + + /* + * Wake up from high level PS if the STA is asleep with too little + * blocks in FW or if the STA is awake. + */ + if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS) + wl1271_ps_link_end(wl, hlid); + + /* Start high-level PS if the STA is asleep with enough blocks in FW */ + else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS) + wl1271_ps_link_start(wl, hlid, true); +} + +static void wl1271_irq_update_links_status(struct wl1271 *wl, + struct wl1271_fw_ap_status *status) +{ + u32 cur_fw_ps_map; + u8 hlid; + + cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap); + if (wl->ap_fw_ps_map != cur_fw_ps_map) { + wl1271_debug(DEBUG_PSM, + "link ps prev 0x%x cur 0x%x changed 0x%x", + wl->ap_fw_ps_map, cur_fw_ps_map, + wl->ap_fw_ps_map ^ cur_fw_ps_map); + + wl->ap_fw_ps_map = cur_fw_ps_map; + } + + for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) { + u8 cnt = status->tx_lnk_free_blks[hlid] - + wl->links[hlid].prev_freed_blks; + + wl->links[hlid].prev_freed_blks = + status->tx_lnk_free_blks[hlid]; + wl->links[hlid].allocated_blks -= cnt; + + wl1271_irq_ps_regulate_link(wl, hlid, + wl->links[hlid].allocated_blks); + } +} + static void wl1271_fw_status(struct wl1271 *wl, - struct wl1271_fw_status *status) + struct wl1271_fw_full_status *full_status) { + struct wl1271_fw_common_status *status = &full_status->common; struct timespec ts; u32 total = 0; int i; - wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false); + if (wl->bss_type == BSS_TYPE_AP_BSS) + wl1271_raw_read(wl, FW_STATUS_ADDR, status, + sizeof(struct wl1271_fw_ap_status), false); + else + wl1271_raw_read(wl, FW_STATUS_ADDR, status, + sizeof(struct wl1271_fw_sta_status), false); wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " "drv_rx_counter = %d, tx_results_counter = %d)", @@ -507,22 +625,54 @@ static void wl1271_fw_status(struct wl1271 *wl, if (total) clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); + /* for AP update num of allocated TX blocks per link and ps status */ + if (wl->bss_type == BSS_TYPE_AP_BSS) + wl1271_irq_update_links_status(wl, &full_status->ap); + /* update the host-chipset time offset */ getnstimeofday(&ts); wl->time_offset = (timespec_to_ns(&ts) >> 10) - (s64)le32_to_cpu(status->fw_localtime); } -#define WL1271_IRQ_MAX_LOOPS 10 +static void wl1271_flush_deferred_work(struct wl1271 *wl) +{ + struct sk_buff *skb; + + /* Pass all received frames to the network stack */ + while ((skb = skb_dequeue(&wl->deferred_rx_queue))) + ieee80211_rx_ni(wl->hw, skb); + + /* Return sent skbs to the network stack */ + while ((skb = skb_dequeue(&wl->deferred_tx_queue))) + ieee80211_tx_status(wl->hw, skb); +} + +static void wl1271_netstack_work(struct work_struct *work) +{ + struct wl1271 *wl = + container_of(work, struct wl1271, netstack_work); + + do { + wl1271_flush_deferred_work(wl); + } while (skb_queue_len(&wl->deferred_rx_queue)); +} + +#define WL1271_IRQ_MAX_LOOPS 256 -static void wl1271_irq_work(struct work_struct *work) +irqreturn_t wl1271_irq(int irq, void *cookie) { int ret; u32 intr; int loopcount = WL1271_IRQ_MAX_LOOPS; + struct wl1271 *wl = (struct wl1271 *)cookie; + bool done = false; + unsigned int defer_count; unsigned long flags; - struct wl1271 *wl = - container_of(work, struct wl1271, irq_work); + + /* TX might be handled here, avoid redundant work */ + set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); + cancel_work_sync(&wl->tx_work); mutex_lock(&wl->mutex); @@ -531,26 +681,27 @@ static void wl1271_irq_work(struct work_struct *work) if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; - ret = wl1271_ps_elp_wakeup(wl, true); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; - spin_lock_irqsave(&wl->wl_lock, flags); - while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) { - clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags); - spin_unlock_irqrestore(&wl->wl_lock, flags); - loopcount--; + while (!done && loopcount--) { + /* + * In order to avoid a race with the hardirq, clear the flag + * before acknowledging the chip. Since the mutex is held, + * wl1271_ps_elp_wakeup cannot be called concurrently. + */ + clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); + smp_mb__after_clear_bit(); wl1271_fw_status(wl, wl->fw_status); - intr = le32_to_cpu(wl->fw_status->intr); + intr = le32_to_cpu(wl->fw_status->common.intr); + intr &= WL1271_INTR_MASK; if (!intr) { - wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); - spin_lock_irqsave(&wl->wl_lock, flags); + done = true; continue; } - intr &= WL1271_INTR_MASK; - if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { wl1271_error("watchdog interrupt received! " "starting recovery."); @@ -560,25 +711,35 @@ static void wl1271_irq_work(struct work_struct *work) goto out; } - if (intr & WL1271_ACX_INTR_DATA) { + if (likely(intr & WL1271_ACX_INTR_DATA)) { wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); - /* check for tx results */ - if (wl->fw_status->tx_results_counter != - (wl->tx_results_count & 0xff)) - wl1271_tx_complete(wl); + wl1271_rx(wl, &wl->fw_status->common); /* Check if any tx blocks were freed */ + spin_lock_irqsave(&wl->wl_lock, flags); if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && wl->tx_queue_count) { + spin_unlock_irqrestore(&wl->wl_lock, flags); /* * In order to avoid starvation of the TX path, * call the work function directly. */ wl1271_tx_work_locked(wl); + } else { + spin_unlock_irqrestore(&wl->wl_lock, flags); } - wl1271_rx(wl, wl->fw_status); + /* check for tx results */ + if (wl->fw_status->common.tx_results_counter != + (wl->tx_results_count & 0xff)) + wl1271_tx_complete(wl); + + /* Make sure the deferred queues don't get too long */ + defer_count = skb_queue_len(&wl->deferred_tx_queue) + + skb_queue_len(&wl->deferred_rx_queue); + if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT) + wl1271_flush_deferred_work(wl); } if (intr & WL1271_ACX_INTR_EVENT_A) { @@ -597,28 +758,48 @@ static void wl1271_irq_work(struct work_struct *work) if (intr & WL1271_ACX_INTR_HW_AVAILABLE) wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); - - spin_lock_irqsave(&wl->wl_lock, flags); } - if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags)) - ieee80211_queue_work(wl->hw, &wl->irq_work); - else - clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); - spin_unlock_irqrestore(&wl->wl_lock, flags); - wl1271_ps_elp_sleep(wl); out: + spin_lock_irqsave(&wl->wl_lock, flags); + /* In case TX was not handled here, queue TX work */ + clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); + if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && + wl->tx_queue_count) + ieee80211_queue_work(wl->hw, &wl->tx_work); + spin_unlock_irqrestore(&wl->wl_lock, flags); + mutex_unlock(&wl->mutex); + + return IRQ_HANDLED; } +EXPORT_SYMBOL_GPL(wl1271_irq); static int wl1271_fetch_firmware(struct wl1271 *wl) { const struct firmware *fw; + const char *fw_name; int ret; - ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl)); + switch (wl->bss_type) { + case BSS_TYPE_AP_BSS: + fw_name = WL1271_AP_FW_NAME; + break; + case BSS_TYPE_IBSS: + case BSS_TYPE_STA_BSS: + fw_name = WL1271_FW_NAME; + break; + default: + wl1271_error("no compatible firmware for bss_type %d", + wl->bss_type); + return -EINVAL; + } + + wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name); + + ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl)); if (ret < 0) { wl1271_error("could not get firmware: %d", ret); @@ -632,6 +813,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl) goto out; } + vfree(wl->fw); wl->fw_len = fw->size; wl->fw = vmalloc(wl->fw_len); @@ -642,7 +824,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl) } memcpy(wl->fw, fw->data, wl->fw_len); - + wl->fw_bss_type = wl->bss_type; ret = 0; out: @@ -778,7 +960,8 @@ static int wl1271_chip_wakeup(struct wl1271 *wl) goto out; } - if (wl->fw == NULL) { + /* Make sure the firmware type matches the BSS type */ + if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) { ret = wl1271_fetch_firmware(wl); if (ret < 0) goto out; @@ -811,6 +994,8 @@ int wl1271_plt_start(struct wl1271 *wl) goto out; } + wl->bss_type = BSS_TYPE_STA_BSS; + while (retries) { retries--; ret = wl1271_chip_wakeup(wl); @@ -827,11 +1012,10 @@ int wl1271_plt_start(struct wl1271 *wl) wl->state = WL1271_STATE_PLT; wl1271_notice("firmware booted in PLT mode (%s)", - wl->chip.fw_ver); + wl->chip.fw_ver_str); goto out; irq_disable: - wl1271_disable_interrupts(wl); mutex_unlock(&wl->mutex); /* Unlocking the mutex in the middle of handling is inherently unsafe. In this case we deem it safe to do, @@ -840,7 +1024,9 @@ irq_disable: work function will not do anything.) Also, any other possible concurrent operations will fail due to the current state, hence the wl1271 struct should be safe. */ - cancel_work_sync(&wl->irq_work); + wl1271_disable_interrupts(wl); + wl1271_flush_deferred_work(wl); + cancel_work_sync(&wl->netstack_work); mutex_lock(&wl->mutex); power_off: wl1271_power_off(wl); @@ -854,12 +1040,10 @@ out: return ret; } -int wl1271_plt_stop(struct wl1271 *wl) +int __wl1271_plt_stop(struct wl1271 *wl) { int ret = 0; - mutex_lock(&wl->mutex); - wl1271_notice("power down"); if (wl->state != WL1271_STATE_PLT) { @@ -869,70 +1053,46 @@ int wl1271_plt_stop(struct wl1271 *wl) goto out; } - wl1271_disable_interrupts(wl); wl1271_power_off(wl); wl->state = WL1271_STATE_OFF; wl->rx_counter = 0; -out: mutex_unlock(&wl->mutex); - - cancel_work_sync(&wl->irq_work); + wl1271_disable_interrupts(wl); + wl1271_flush_deferred_work(wl); + cancel_work_sync(&wl->netstack_work); cancel_work_sync(&wl->recovery_work); + mutex_lock(&wl->mutex); +out: + return ret; +} + +int wl1271_plt_stop(struct wl1271 *wl) +{ + int ret; + mutex_lock(&wl->mutex); + ret = __wl1271_plt_stop(wl); + mutex_unlock(&wl->mutex); return ret; } -static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct wl1271 *wl = hw->priv; - struct ieee80211_conf *conf = &hw->conf; - struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); - struct ieee80211_sta *sta = txinfo->control.sta; unsigned long flags; int q; + u8 hlid = 0; - /* - * peek into the rates configured in the STA entry. - * The rates set after connection stage, The first block only BG sets: - * the compare is for bit 0-16 of sta_rate_set. The second block add - * HT rates in case of HT supported. - */ - spin_lock_irqsave(&wl->wl_lock, flags); - if (sta && - (sta->supp_rates[conf->channel->band] != - (wl->sta_rate_set & HW_BG_RATES_MASK))) { - wl->sta_rate_set = sta->supp_rates[conf->channel->band]; - set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags); - } - -#ifdef CONFIG_WL12XX_HT - if (sta && - sta->ht_cap.ht_supported && - ((wl->sta_rate_set >> HW_HT_RATES_OFFSET) != - sta->ht_cap.mcs.rx_mask[0])) { - /* Clean MCS bits before setting them */ - wl->sta_rate_set &= HW_BG_RATES_MASK; - wl->sta_rate_set |= - (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET); - set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags); - } -#endif - wl->tx_queue_count++; - spin_unlock_irqrestore(&wl->wl_lock, flags); - - /* queue the packet */ q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); - skb_queue_tail(&wl->tx_queue[q], skb); - /* - * The chip specific setup must run before the first TX packet - - * before that, the tx_work will not be initialized! - */ + if (wl->bss_type == BSS_TYPE_AP_BSS) + hlid = wl1271_tx_get_hlid(skb); - if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) - ieee80211_queue_work(wl->hw, &wl->tx_work); + spin_lock_irqsave(&wl->wl_lock, flags); + + wl->tx_queue_count++; /* * The workqueue is slow to process the tx_queue and we need stop @@ -940,14 +1100,28 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) */ if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) { wl1271_debug(DEBUG_TX, "op_tx: stopping queues"); - - spin_lock_irqsave(&wl->wl_lock, flags); ieee80211_stop_queues(wl->hw); set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags); - spin_unlock_irqrestore(&wl->wl_lock, flags); } - return NETDEV_TX_OK; + /* queue the packet */ + if (wl->bss_type == BSS_TYPE_AP_BSS) { + wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); + skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); + } else { + skb_queue_tail(&wl->tx_queue[q], skb); + } + + /* + * The chip specific setup must run before the first TX packet - + * before that, the tx_work will not be initialized! + */ + + if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && + !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags)) + ieee80211_queue_work(wl->hw, &wl->tx_work); + + spin_unlock_irqrestore(&wl->wl_lock, flags); } static struct notifier_block wl1271_dev_notifier = { @@ -967,6 +1141,9 @@ static int wl1271_op_start(struct ieee80211_hw *hw) * * The MAC address is first known when the corresponding interface * is added. That is where we will initialize the hardware. + * + * In addition, we currently have different firmwares for AP and managed + * operation. We will know which to boot according to interface type. */ return 0; @@ -1006,6 +1183,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, wl->bss_type = BSS_TYPE_IBSS; wl->set_bss_type = BSS_TYPE_STA_BSS; break; + case NL80211_IFTYPE_AP: + wl->bss_type = BSS_TYPE_AP_BSS; + break; default: ret = -EOPNOTSUPP; goto out; @@ -1038,7 +1218,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, break; irq_disable: - wl1271_disable_interrupts(wl); mutex_unlock(&wl->mutex); /* Unlocking the mutex in the middle of handling is inherently unsafe. In this case we deem it safe to do, @@ -1047,7 +1226,9 @@ irq_disable: work function will not do anything.) Also, any other possible concurrent operations will fail due to the current state, hence the wl1271 struct should be safe. */ - cancel_work_sync(&wl->irq_work); + wl1271_disable_interrupts(wl); + wl1271_flush_deferred_work(wl); + cancel_work_sync(&wl->netstack_work); mutex_lock(&wl->mutex); power_off: wl1271_power_off(wl); @@ -1061,11 +1242,11 @@ power_off: wl->vif = vif; wl->state = WL1271_STATE_ON; - wl1271_info("firmware booted (%s)", wl->chip.fw_ver); + wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str); /* update hw/fw version info in wiphy struct */ wiphy->hw_version = wl->chip.id; - strncpy(wiphy->fw_version, wl->chip.fw_ver, + strncpy(wiphy->fw_version, wl->chip.fw_ver_str, sizeof(wiphy->fw_version)); /* @@ -1113,12 +1294,12 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl) wl->state = WL1271_STATE_OFF; - wl1271_disable_interrupts(wl); - mutex_unlock(&wl->mutex); + wl1271_disable_interrupts(wl); + wl1271_flush_deferred_work(wl); cancel_delayed_work_sync(&wl->scan_complete_work); - cancel_work_sync(&wl->irq_work); + cancel_work_sync(&wl->netstack_work); cancel_work_sync(&wl->tx_work); cancel_delayed_work_sync(&wl->pspoll_work); cancel_delayed_work_sync(&wl->elp_work); @@ -1147,10 +1328,13 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl) wl->time_offset = 0; wl->session_counter = 0; wl->rate_set = CONF_TX_RATE_MASK_BASIC; - wl->sta_rate_set = 0; wl->flags = 0; wl->vif = NULL; wl->filters = 0; + wl1271_free_ap_keys(wl); + memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map)); + wl->ap_fw_ps_map = 0; + wl->ap_ps_map = 0; for (i = 0; i < NUM_TX_QUEUES; i++) wl->tx_blocks_freed[i] = 0; @@ -1186,8 +1370,7 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw, static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters) { - wl->rx_config = WL1271_DEFAULT_RX_CONFIG; - wl->rx_filter = WL1271_DEFAULT_RX_FILTER; + wl1271_set_default_filters(wl); /* combine requested filters with current filter config */ filters = wl->filters | filters; @@ -1322,25 +1505,7 @@ static void wl1271_set_band_rate(struct wl1271 *wl) wl->basic_rate_set = wl->conf.tx.basic_rate_5; } -static u32 wl1271_min_rate_get(struct wl1271 *wl) -{ - int i; - u32 rate = 0; - - if (!wl->basic_rate_set) { - WARN_ON(1); - wl->basic_rate_set = wl->conf.tx.basic_rate; - } - - for (i = 0; !rate; i++) { - if ((wl->basic_rate_set >> i) & 0x1) - rate = 1 << i; - } - - return rate; -} - -static int wl1271_handle_idle(struct wl1271 *wl, bool idle) +static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle) { int ret; @@ -1350,9 +1515,8 @@ static int wl1271_handle_idle(struct wl1271 *wl, bool idle) if (ret < 0) goto out; } - wl->rate_set = wl1271_min_rate_get(wl); - wl->sta_rate_set = 0; - ret = wl1271_acx_rate_policies(wl); + wl->rate_set = wl1271_tx_min_rate_get(wl); + ret = wl1271_acx_sta_rate_policies(wl); if (ret < 0) goto out; ret = wl1271_acx_keep_alive_config( @@ -1381,14 +1545,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) struct wl1271 *wl = hw->priv; struct ieee80211_conf *conf = &hw->conf; int channel, ret = 0; + bool is_ap; channel = ieee80211_frequency_to_channel(conf->channel->center_freq); - wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s", + wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s" + " changed 0x%x", channel, conf->flags & IEEE80211_CONF_PS ? "on" : "off", conf->power_level, - conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use"); + conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use", + changed); /* * mac80211 will go to idle nearly immediately after transmitting some @@ -1406,7 +1573,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) goto out; } - ret = wl1271_ps_elp_wakeup(wl, false); + is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; @@ -1417,31 +1586,34 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) wl->band = conf->channel->band; wl->channel = channel; - /* - * FIXME: the mac80211 should really provide a fixed rate - * to use here. for now, just use the smallest possible rate - * for the band as a fixed rate for association frames and - * other control messages. - */ - if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) - wl1271_set_band_rate(wl); - - wl->basic_rate = wl1271_min_rate_get(wl); - ret = wl1271_acx_rate_policies(wl); - if (ret < 0) - wl1271_warning("rate policy for update channel " - "failed %d", ret); + if (!is_ap) { + /* + * FIXME: the mac80211 should really provide a fixed + * rate to use here. for now, just use the smallest + * possible rate for the band as a fixed rate for + * association frames and other control messages. + */ + if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) + wl1271_set_band_rate(wl); - if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) { - ret = wl1271_join(wl, false); + wl->basic_rate = wl1271_tx_min_rate_get(wl); + ret = wl1271_acx_sta_rate_policies(wl); if (ret < 0) - wl1271_warning("cmd join to update channel " + wl1271_warning("rate policy for channel " "failed %d", ret); + + if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) { + ret = wl1271_join(wl, false); + if (ret < 0) + wl1271_warning("cmd join on channel " + "failed %d", ret); + } } } - if (changed & IEEE80211_CONF_CHANGE_IDLE) { - ret = wl1271_handle_idle(wl, conf->flags & IEEE80211_CONF_IDLE); + if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) { + ret = wl1271_sta_handle_idle(wl, + conf->flags & IEEE80211_CONF_IDLE); if (ret < 0) wl1271_warning("idle mode change failed %d", ret); } @@ -1548,7 +1720,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, struct wl1271 *wl = hw->priv; int ret; - wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter"); + wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x" + " total %x", changed, *total); mutex_lock(&wl->mutex); @@ -1558,19 +1731,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw, if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; - ret = wl1271_ps_elp_wakeup(wl, false); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; - - if (*total & FIF_ALLMULTI) - ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0); - else if (fp) - ret = wl1271_acx_group_address_tbl(wl, fp->enabled, - fp->mc_list, - fp->mc_list_length); - if (ret < 0) - goto out_sleep; + if (wl->bss_type != BSS_TYPE_AP_BSS) { + if (*total & FIF_ALLMULTI) + ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0); + else if (fp) + ret = wl1271_acx_group_address_tbl(wl, fp->enabled, + fp->mc_list, + fp->mc_list_length); + if (ret < 0) + goto out_sleep; + } /* determine, whether supported filter values have changed */ if (changed == 0) @@ -1593,38 +1767,192 @@ out: kfree(fp); } +static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type, + u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, + u16 tx_seq_16) +{ + struct wl1271_ap_key *ap_key; + int i; + + wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id); + + if (key_size > MAX_KEY_SIZE) + return -EINVAL; + + /* + * Find next free entry in ap_keys. Also check we are not replacing + * an existing key. + */ + for (i = 0; i < MAX_NUM_KEYS; i++) { + if (wl->recorded_ap_keys[i] == NULL) + break; + + if (wl->recorded_ap_keys[i]->id == id) { + wl1271_warning("trying to record key replacement"); + return -EINVAL; + } + } + + if (i == MAX_NUM_KEYS) + return -EBUSY; + + ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL); + if (!ap_key) + return -ENOMEM; + + ap_key->id = id; + ap_key->key_type = key_type; + ap_key->key_size = key_size; + memcpy(ap_key->key, key, key_size); + ap_key->hlid = hlid; + ap_key->tx_seq_32 = tx_seq_32; + ap_key->tx_seq_16 = tx_seq_16; + + wl->recorded_ap_keys[i] = ap_key; + return 0; +} + +static void wl1271_free_ap_keys(struct wl1271 *wl) +{ + int i; + + for (i = 0; i < MAX_NUM_KEYS; i++) { + kfree(wl->recorded_ap_keys[i]); + wl->recorded_ap_keys[i] = NULL; + } +} + +static int wl1271_ap_init_hwenc(struct wl1271 *wl) +{ + int i, ret = 0; + struct wl1271_ap_key *key; + bool wep_key_added = false; + + for (i = 0; i < MAX_NUM_KEYS; i++) { + if (wl->recorded_ap_keys[i] == NULL) + break; + + key = wl->recorded_ap_keys[i]; + ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE, + key->id, key->key_type, + key->key_size, key->key, + key->hlid, key->tx_seq_32, + key->tx_seq_16); + if (ret < 0) + goto out; + + if (key->key_type == KEY_WEP) + wep_key_added = true; + } + + if (wep_key_added) { + ret = wl1271_cmd_set_ap_default_wep_key(wl, wl->default_key); + if (ret < 0) + goto out; + } + +out: + wl1271_free_ap_keys(wl); + return ret; +} + +static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, + u8 key_size, const u8 *key, u32 tx_seq_32, + u16 tx_seq_16, struct ieee80211_sta *sta) +{ + int ret; + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + + if (is_ap) { + struct wl1271_station *wl_sta; + u8 hlid; + + if (sta) { + wl_sta = (struct wl1271_station *)sta->drv_priv; + hlid = wl_sta->hlid; + } else { + hlid = WL1271_AP_BROADCAST_HLID; + } + + if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { + /* + * We do not support removing keys after AP shutdown. + * Pretend we do to make mac80211 happy. + */ + if (action != KEY_ADD_OR_REPLACE) + return 0; + + ret = wl1271_record_ap_key(wl, id, + key_type, key_size, + key, hlid, tx_seq_32, + tx_seq_16); + } else { + ret = wl1271_cmd_set_ap_key(wl, action, + id, key_type, key_size, + key, hlid, tx_seq_32, + tx_seq_16); + } + + if (ret < 0) + return ret; + } else { + const u8 *addr; + static const u8 bcast_addr[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff + }; + + addr = sta ? sta->addr : bcast_addr; + + if (is_zero_ether_addr(addr)) { + /* We dont support TX only encryption */ + return -EOPNOTSUPP; + } + + /* The wl1271 does not allow to remove unicast keys - they + will be cleared automatically on next CMD_JOIN. Ignore the + request silently, as we dont want the mac80211 to emit + an error message. */ + if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr)) + return 0; + + ret = wl1271_cmd_set_sta_key(wl, action, + id, key_type, key_size, + key, addr, tx_seq_32, + tx_seq_16); + if (ret < 0) + return ret; + + /* the default WEP key needs to be configured at least once */ + if (key_type == KEY_WEP) { + ret = wl1271_cmd_set_sta_default_wep_key(wl, + wl->default_key); + if (ret < 0) + return ret; + } + } + + return 0; +} + static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key_conf) { struct wl1271 *wl = hw->priv; - const u8 *addr; int ret; u32 tx_seq_32 = 0; u16 tx_seq_16 = 0; u8 key_type; - static const u8 bcast_addr[ETH_ALEN] = - { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - wl1271_debug(DEBUG_MAC80211, "mac80211 set key"); - addr = sta ? sta->addr : bcast_addr; - - wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd); - wl1271_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN); + wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta); wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", key_conf->cipher, key_conf->keyidx, key_conf->keylen, key_conf->flags); wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); - if (is_zero_ether_addr(addr)) { - /* We dont support TX only encryption */ - ret = -EOPNOTSUPP; - goto out; - } - mutex_lock(&wl->mutex); if (unlikely(wl->state == WL1271_STATE_OFF)) { @@ -1632,7 +1960,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, goto out_unlock; } - ret = wl1271_ps_elp_wakeup(wl, false); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out_unlock; @@ -1671,36 +1999,21 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, switch (cmd) { case SET_KEY: - ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE, - key_conf->keyidx, key_type, - key_conf->keylen, key_conf->key, - addr, tx_seq_32, tx_seq_16); + ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE, + key_conf->keyidx, key_type, + key_conf->keylen, key_conf->key, + tx_seq_32, tx_seq_16, sta); if (ret < 0) { wl1271_error("Could not add or replace key"); goto out_sleep; } - - /* the default WEP key needs to be configured at least once */ - if (key_type == KEY_WEP) { - ret = wl1271_cmd_set_default_wep_key(wl, - wl->default_key); - if (ret < 0) - goto out_sleep; - } break; case DISABLE_KEY: - /* The wl1271 does not allow to remove unicast keys - they - will be cleared automatically on next CMD_JOIN. Ignore the - request silently, as we dont want the mac80211 to emit - an error message. */ - if (!is_broadcast_ether_addr(addr)) - break; - - ret = wl1271_cmd_set_key(wl, KEY_REMOVE, - key_conf->keyidx, key_type, - key_conf->keylen, key_conf->key, - addr, 0, 0); + ret = wl1271_set_key(wl, KEY_REMOVE, + key_conf->keyidx, key_type, + key_conf->keylen, key_conf->key, + 0, 0, sta); if (ret < 0) { wl1271_error("Could not remove key"); goto out_sleep; @@ -1719,7 +2032,6 @@ out_sleep: out_unlock: mutex_unlock(&wl->mutex); -out: return ret; } @@ -1751,7 +2063,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw, goto out; } - ret = wl1271_ps_elp_wakeup(wl, false); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; @@ -1777,7 +2089,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) goto out; } - ret = wl1271_ps_elp_wakeup(wl, false); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; @@ -1805,7 +2117,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) goto out; } - ret = wl1271_ps_elp_wakeup(wl, false); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; @@ -1821,7 +2133,7 @@ out: return ret; } -static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb, +static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb, int offset) { u8 *ptr = skb->data + offset; @@ -1831,89 +2143,213 @@ static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb, if (ptr[0] == WLAN_EID_SSID) { wl->ssid_len = ptr[1]; memcpy(wl->ssid, ptr+2, wl->ssid_len); - return; + return 0; } ptr += (ptr[1] + 2); } + wl1271_error("No SSID in IEs!\n"); + return -ENOENT; } -static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, +static int wl1271_bss_erp_info_changed(struct wl1271 *wl, struct ieee80211_bss_conf *bss_conf, u32 changed) { - enum wl1271_cmd_ps_mode mode; - struct wl1271 *wl = hw->priv; - struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid); - bool do_join = false; - bool set_assoc = false; - int ret; + int ret = 0; - wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed"); + if (changed & BSS_CHANGED_ERP_SLOT) { + if (bss_conf->use_short_slot) + ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT); + else + ret = wl1271_acx_slot(wl, SLOT_TIME_LONG); + if (ret < 0) { + wl1271_warning("Set slot time failed %d", ret); + goto out; + } + } - mutex_lock(&wl->mutex); + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + if (bss_conf->use_short_preamble) + wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT); + else + wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG); + } - if (unlikely(wl->state == WL1271_STATE_OFF)) - goto out; + if (changed & BSS_CHANGED_ERP_CTS_PROT) { + if (bss_conf->use_cts_prot) + ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE); + else + ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE); + if (ret < 0) { + wl1271_warning("Set ctsprotect failed %d", ret); + goto out; + } + } - ret = wl1271_ps_elp_wakeup(wl, false); - if (ret < 0) - goto out; +out: + return ret; +} + +static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + int ret = 0; - if ((changed & BSS_CHANGED_BEACON_INT) && - (wl->bss_type == BSS_TYPE_IBSS)) { - wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d", + if ((changed & BSS_CHANGED_BEACON_INT)) { + wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d", bss_conf->beacon_int); wl->beacon_int = bss_conf->beacon_int; - do_join = true; } - if ((changed & BSS_CHANGED_BEACON) && - (wl->bss_type == BSS_TYPE_IBSS)) { - struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); + if ((changed & BSS_CHANGED_BEACON)) { + struct ieee80211_hdr *hdr; + int ieoffset = offsetof(struct ieee80211_mgmt, + u.beacon.variable); + struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif); + u16 tmpl_id; - wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated"); + if (!beacon) + goto out; - if (beacon) { - struct ieee80211_hdr *hdr; - int ieoffset = offsetof(struct ieee80211_mgmt, - u.beacon.variable); + wl1271_debug(DEBUG_MASTER, "beacon updated"); - wl1271_ssid_set(wl, beacon, ieoffset); + ret = wl1271_ssid_set(wl, beacon, ieoffset); + if (ret < 0) { + dev_kfree_skb(beacon); + goto out; + } + tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON : + CMD_TEMPL_BEACON; + ret = wl1271_cmd_template_set(wl, tmpl_id, + beacon->data, + beacon->len, 0, + wl1271_tx_min_rate_get(wl)); + if (ret < 0) { + dev_kfree_skb(beacon); + goto out; + } - ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, - beacon->data, - beacon->len, 0, - wl1271_min_rate_get(wl)); + hdr = (struct ieee80211_hdr *) beacon->data; + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_PROBE_RESP); + + tmpl_id = is_ap ? CMD_TEMPL_AP_PROBE_RESPONSE : + CMD_TEMPL_PROBE_RESPONSE; + ret = wl1271_cmd_template_set(wl, + tmpl_id, + beacon->data, + beacon->len, 0, + wl1271_tx_min_rate_get(wl)); + dev_kfree_skb(beacon); + if (ret < 0) + goto out; + } - if (ret < 0) { - dev_kfree_skb(beacon); - goto out_sleep; - } +out: + return ret; +} - hdr = (struct ieee80211_hdr *) beacon->data; - hdr->frame_control = cpu_to_le16( - IEEE80211_FTYPE_MGMT | - IEEE80211_STYPE_PROBE_RESP); +/* AP mode changes */ +static void wl1271_bss_info_changed_ap(struct wl1271 *wl, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + int ret = 0; - ret = wl1271_cmd_template_set(wl, - CMD_TEMPL_PROBE_RESPONSE, - beacon->data, - beacon->len, 0, - wl1271_min_rate_get(wl)); - dev_kfree_skb(beacon); - if (ret < 0) - goto out_sleep; + if ((changed & BSS_CHANGED_BASIC_RATES)) { + u32 rates = bss_conf->basic_rates; + struct conf_tx_rate_class mgmt_rc; + + wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates); + wl->basic_rate = wl1271_tx_min_rate_get(wl); + wl1271_debug(DEBUG_AP, "basic rates: 0x%x", + wl->basic_rate_set); + + /* update the AP management rate policy with the new rates */ + mgmt_rc.enabled_rates = wl->basic_rate_set; + mgmt_rc.long_retry_limit = 10; + mgmt_rc.short_retry_limit = 10; + mgmt_rc.aflags = 0; + ret = wl1271_acx_ap_rate_policy(wl, &mgmt_rc, + ACX_TX_AP_MODE_MGMT_RATE); + if (ret < 0) { + wl1271_error("AP mgmt policy change failed %d", ret); + goto out; + } + } - /* Need to update the SSID (for filtering etc) */ - do_join = true; + ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed); + if (ret < 0) + goto out; + + if ((changed & BSS_CHANGED_BEACON_ENABLED)) { + if (bss_conf->enable_beacon) { + if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { + ret = wl1271_cmd_start_bss(wl); + if (ret < 0) + goto out; + + set_bit(WL1271_FLAG_AP_STARTED, &wl->flags); + wl1271_debug(DEBUG_AP, "started AP"); + + ret = wl1271_ap_init_hwenc(wl); + if (ret < 0) + goto out; + } + } else { + if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { + ret = wl1271_cmd_stop_bss(wl); + if (ret < 0) + goto out; + + clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags); + wl1271_debug(DEBUG_AP, "stopped AP"); + } } } - if ((changed & BSS_CHANGED_BEACON_ENABLED) && - (wl->bss_type == BSS_TYPE_IBSS)) { + ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); + if (ret < 0) + goto out; +out: + return; +} + +/* STA/IBSS mode changes */ +static void wl1271_bss_info_changed_sta(struct wl1271 *wl, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + bool do_join = false, set_assoc = false; + bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS); + u32 sta_rate_set = 0; + int ret; + struct ieee80211_sta *sta; + bool sta_exists = false; + struct ieee80211_sta_ht_cap sta_ht_cap; + + if (is_ibss) { + ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, + changed); + if (ret < 0) + goto out; + } + + if ((changed & BSS_CHANGED_BEACON_INT) && is_ibss) + do_join = true; + + /* Need to update the SSID (for filtering etc) */ + if ((changed & BSS_CHANGED_BEACON) && is_ibss) + do_join = true; + + if ((changed & BSS_CHANGED_BEACON_ENABLED) && is_ibss) { wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s", bss_conf->enable_beacon ? "enabled" : "disabled"); @@ -1924,7 +2360,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, do_join = true; } - if (changed & BSS_CHANGED_CQM) { + if ((changed & BSS_CHANGED_CQM)) { bool enable = false; if (bss_conf->cqm_rssi_thold) enable = true; @@ -1942,24 +2378,70 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, * and enable the BSSID filter */ memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) { - memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); + memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); + if (!is_zero_ether_addr(wl->bssid)) { ret = wl1271_cmd_build_null_data(wl); if (ret < 0) - goto out_sleep; + goto out; ret = wl1271_build_qos_null_data(wl); if (ret < 0) - goto out_sleep; + goto out; /* filter out all packets not from this BSSID */ wl1271_configure_filters(wl, 0); /* Need to update the BSSID (for filtering etc) */ do_join = true; + } } - if (changed & BSS_CHANGED_ASSOC) { + rcu_read_lock(); + sta = ieee80211_find_sta(vif, bss_conf->bssid); + if (sta) { + /* save the supp_rates of the ap */ + sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band]; + if (sta->ht_cap.ht_supported) + sta_rate_set |= + (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET); + sta_ht_cap = sta->ht_cap; + sta_exists = true; + } + rcu_read_unlock(); + + if (sta_exists) { + /* handle new association with HT and HT information change */ + if ((changed & BSS_CHANGED_HT) && + (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { + ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap, + true); + if (ret < 0) { + wl1271_warning("Set ht cap true failed %d", + ret); + goto out; + } + ret = wl1271_acx_set_ht_information(wl, + bss_conf->ht_operation_mode); + if (ret < 0) { + wl1271_warning("Set ht information failed %d", + ret); + goto out; + } + } + /* handle new association without HT and disassociation */ + else if (changed & BSS_CHANGED_ASSOC) { + ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap, + false); + if (ret < 0) { + wl1271_warning("Set ht cap false failed %d", + ret); + goto out; + } + } + } + + if ((changed & BSS_CHANGED_ASSOC)) { if (bss_conf->assoc) { u32 rates; int ieoffset; @@ -1975,10 +2457,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, rates = bss_conf->basic_rates; wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates); - wl->basic_rate = wl1271_min_rate_get(wl); - ret = wl1271_acx_rate_policies(wl); + wl->basic_rate = wl1271_tx_min_rate_get(wl); + if (sta_rate_set) + wl->rate_set = wl1271_tx_enabled_rates_get(wl, + sta_rate_set); + ret = wl1271_acx_sta_rate_policies(wl); if (ret < 0) - goto out_sleep; + goto out; /* * with wl1271, we don't need to update the @@ -1988,7 +2473,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, */ ret = wl1271_cmd_build_ps_poll(wl, wl->aid); if (ret < 0) - goto out_sleep; + goto out; /* * Get a template for hardware connection maintenance @@ -2002,17 +2487,19 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, /* enable the connection monitoring feature */ ret = wl1271_acx_conn_monit_params(wl, true); if (ret < 0) - goto out_sleep; + goto out; /* If we want to go in PSM but we're not there yet */ if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) && !test_bit(WL1271_FLAG_PSM, &wl->flags)) { + enum wl1271_cmd_ps_mode mode; + mode = STATION_POWER_SAVE_MODE; ret = wl1271_ps_set_mode(wl, mode, wl->basic_rate, true); if (ret < 0) - goto out_sleep; + goto out; } } else { /* use defaults when not associated */ @@ -2029,10 +2516,10 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, /* revert back to minimum rates for the current band */ wl1271_set_band_rate(wl); - wl->basic_rate = wl1271_min_rate_get(wl); - ret = wl1271_acx_rate_policies(wl); + wl->basic_rate = wl1271_tx_min_rate_get(wl); + ret = wl1271_acx_sta_rate_policies(wl); if (ret < 0) - goto out_sleep; + goto out; /* disable connection monitor features */ ret = wl1271_acx_conn_monit_params(wl, false); @@ -2040,74 +2527,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, /* Disable the keep-alive feature */ ret = wl1271_acx_keep_alive_mode(wl, false); if (ret < 0) - goto out_sleep; + goto out; /* restore the bssid filter and go to dummy bssid */ wl1271_unjoin(wl); wl1271_dummy_join(wl); } - - } - - if (changed & BSS_CHANGED_ERP_SLOT) { - if (bss_conf->use_short_slot) - ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT); - else - ret = wl1271_acx_slot(wl, SLOT_TIME_LONG); - if (ret < 0) { - wl1271_warning("Set slot time failed %d", ret); - goto out_sleep; - } } - if (changed & BSS_CHANGED_ERP_PREAMBLE) { - if (bss_conf->use_short_preamble) - wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT); - else - wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG); - } - - if (changed & BSS_CHANGED_ERP_CTS_PROT) { - if (bss_conf->use_cts_prot) - ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE); - else - ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE); - if (ret < 0) { - wl1271_warning("Set ctsprotect failed %d", ret); - goto out_sleep; - } - } - - /* - * Takes care of: New association with HT enable, - * HT information change in beacon. - */ - if (sta && - (changed & BSS_CHANGED_HT) && - (bss_conf->channel_type != NL80211_CHAN_NO_HT)) { - ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true); - if (ret < 0) { - wl1271_warning("Set ht cap true failed %d", ret); - goto out_sleep; - } - ret = wl1271_acx_set_ht_information(wl, - bss_conf->ht_operation_mode); - if (ret < 0) { - wl1271_warning("Set ht information failed %d", ret); - goto out_sleep; - } - } - /* - * Takes care of: New association without HT, - * Disassociation. - */ - else if (sta && (changed & BSS_CHANGED_ASSOC)) { - ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, false); - if (ret < 0) { - wl1271_warning("Set ht cap false failed %d", ret); - goto out_sleep; - } - } + ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); + if (ret < 0) + goto out; if (changed & BSS_CHANGED_ARP_FILTER) { __be32 addr = bss_conf->arp_addr_list[0]; @@ -2124,29 +2554,57 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, ret = wl1271_cmd_build_arp_rsp(wl, addr); if (ret < 0) { wl1271_warning("build arp rsp failed: %d", ret); - goto out_sleep; + goto out; } ret = wl1271_acx_arp_ip_filter(wl, - (ACX_ARP_FILTER_ARP_FILTERING | - ACX_ARP_FILTER_AUTO_ARP), + ACX_ARP_FILTER_ARP_FILTERING, addr); } else ret = wl1271_acx_arp_ip_filter(wl, 0, addr); if (ret < 0) - goto out_sleep; + goto out; } if (do_join) { ret = wl1271_join(wl, set_assoc); if (ret < 0) { wl1271_warning("cmd join failed %d", ret); - goto out_sleep; + goto out; } } -out_sleep: +out: + return; +} + +static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct wl1271 *wl = hw->priv; + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + int ret; + + wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x", + (int)changed); + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state == WL1271_STATE_OFF)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + if (is_ap) + wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed); + else + wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed); + wl1271_ps_elp_sleep(wl); out: @@ -2158,42 +2616,66 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue, { struct wl1271 *wl = hw->priv; u8 ps_scheme; - int ret; + int ret = 0; mutex_lock(&wl->mutex); wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); - if (unlikely(wl->state == WL1271_STATE_OFF)) { - ret = -EAGAIN; - goto out; - } - - ret = wl1271_ps_elp_wakeup(wl, false); - if (ret < 0) - goto out; - - /* the txop is confed in units of 32us by the mac80211, we need us */ - ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue), - params->cw_min, params->cw_max, - params->aifs, params->txop << 5); - if (ret < 0) - goto out_sleep; - if (params->uapsd) ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER; else ps_scheme = CONF_PS_SCHEME_LEGACY; - ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue), - CONF_CHANNEL_TYPE_EDCF, - wl1271_tx_get_queue(queue), - ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0); - if (ret < 0) - goto out_sleep; + if (wl->state == WL1271_STATE_OFF) { + /* + * If the state is off, the parameters will be recorded and + * configured on init. This happens in AP-mode. + */ + struct conf_tx_ac_category *conf_ac = + &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)]; + struct conf_tx_tid *conf_tid = + &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)]; + + conf_ac->ac = wl1271_tx_get_queue(queue); + conf_ac->cw_min = (u8)params->cw_min; + conf_ac->cw_max = params->cw_max; + conf_ac->aifsn = params->aifs; + conf_ac->tx_op_limit = params->txop << 5; + + conf_tid->queue_id = wl1271_tx_get_queue(queue); + conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF; + conf_tid->tsid = wl1271_tx_get_queue(queue); + conf_tid->ps_scheme = ps_scheme; + conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY; + conf_tid->apsd_conf[0] = 0; + conf_tid->apsd_conf[1] = 0; + } else { + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + /* + * the txop is confed in units of 32us by the mac80211, + * we need us + */ + ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue), + params->cw_min, params->cw_max, + params->aifs, params->txop << 5); + if (ret < 0) + goto out_sleep; + + ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue), + CONF_CHANNEL_TYPE_EDCF, + wl1271_tx_get_queue(queue), + ps_scheme, CONF_ACK_POLICY_LEGACY, + 0, 0); + if (ret < 0) + goto out_sleep; out_sleep: - wl1271_ps_elp_sleep(wl); + wl1271_ps_elp_sleep(wl); + } out: mutex_unlock(&wl->mutex); @@ -2215,7 +2697,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw) if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; - ret = wl1271_ps_elp_wakeup(wl, false); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; @@ -2247,6 +2729,184 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx, return 0; } +static int wl1271_allocate_sta(struct wl1271 *wl, + struct ieee80211_sta *sta, + u8 *hlid) +{ + struct wl1271_station *wl_sta; + int id; + + id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS); + if (id >= AP_MAX_STATIONS) { + wl1271_warning("could not allocate HLID - too much stations"); + return -EBUSY; + } + + wl_sta = (struct wl1271_station *)sta->drv_priv; + __set_bit(id, wl->ap_hlid_map); + wl_sta->hlid = WL1271_AP_STA_HLID_START + id; + *hlid = wl_sta->hlid; + memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN); + return 0; +} + +static void wl1271_free_sta(struct wl1271 *wl, u8 hlid) +{ + int id = hlid - WL1271_AP_STA_HLID_START; + + if (WARN_ON(!test_bit(id, wl->ap_hlid_map))) + return; + + __clear_bit(id, wl->ap_hlid_map); + memset(wl->links[hlid].addr, 0, ETH_ALEN); + wl1271_tx_reset_link_queues(wl, hlid); + __clear_bit(hlid, &wl->ap_ps_map); + __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); +} + +static int wl1271_op_sta_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct wl1271 *wl = hw->priv; + int ret = 0; + u8 hlid; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state == WL1271_STATE_OFF)) + goto out; + + if (wl->bss_type != BSS_TYPE_AP_BSS) + goto out; + + wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid); + + ret = wl1271_allocate_sta(wl, sta, &hlid); + if (ret < 0) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out_free_sta; + + ret = wl1271_cmd_add_sta(wl, sta, hlid); + if (ret < 0) + goto out_sleep; + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out_free_sta: + if (ret < 0) + wl1271_free_sta(wl, hlid); + +out: + mutex_unlock(&wl->mutex); + return ret; +} + +static int wl1271_op_sta_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct wl1271 *wl = hw->priv; + struct wl1271_station *wl_sta; + int ret = 0, id; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state == WL1271_STATE_OFF)) + goto out; + + if (wl->bss_type != BSS_TYPE_AP_BSS) + goto out; + + wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid); + + wl_sta = (struct wl1271_station *)sta->drv_priv; + id = wl_sta->hlid - WL1271_AP_STA_HLID_START; + if (WARN_ON(!test_bit(id, wl->ap_hlid_map))) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + ret = wl1271_cmd_remove_sta(wl, wl_sta->hlid); + if (ret < 0) + goto out_sleep; + + wl1271_free_sta(wl, wl_sta->hlid); + +out_sleep: + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + return ret; +} + +int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_ampdu_mlme_action action, + struct ieee80211_sta *sta, u16 tid, u16 *ssn, + u8 buf_size) +{ + struct wl1271 *wl = hw->priv; + int ret; + + mutex_lock(&wl->mutex); + + if (unlikely(wl->state == WL1271_STATE_OFF)) { + ret = -EAGAIN; + goto out; + } + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + switch (action) { + case IEEE80211_AMPDU_RX_START: + if (wl->ba_support) { + ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn, + true); + if (!ret) + wl->ba_rx_bitmap |= BIT(tid); + } else { + ret = -ENOTSUPP; + } + break; + + case IEEE80211_AMPDU_RX_STOP: + ret = wl1271_acx_set_ba_receiver_session(wl, tid, 0, false); + if (!ret) + wl->ba_rx_bitmap &= ~BIT(tid); + break; + + /* + * The BA initiator session management in FW independently. + * Falling break here on purpose for all TX APDU commands. + */ + case IEEE80211_AMPDU_TX_START: + case IEEE80211_AMPDU_TX_STOP: + case IEEE80211_AMPDU_TX_OPERATIONAL: + ret = -EINVAL; + break; + + default: + wl1271_error("Incorrect ampdu action id=%x\n", action); + ret = -EINVAL; + } + + wl1271_ps_elp_sleep(wl); + +out: + mutex_unlock(&wl->mutex); + + return ret; +} + /* can't be const, mac80211 writes to this */ static struct ieee80211_rate wl1271_rates[] = { { .bitrate = 10, @@ -2305,6 +2965,7 @@ static struct ieee80211_channel wl1271_channels[] = { { .hw_value = 11, .center_freq = 2462, .max_power = 25 }, { .hw_value = 12, .center_freq = 2467, .max_power = 25 }, { .hw_value = 13, .center_freq = 2472, .max_power = 25 }, + { .hw_value = 14, .center_freq = 2484, .max_power = 25 }, }; /* mapping to indexes for wl1271_rates */ @@ -2493,6 +3154,9 @@ static const struct ieee80211_ops wl1271_ops = { .conf_tx = wl1271_op_conf_tx, .get_tsf = wl1271_op_get_tsf, .get_survey = wl1271_op_get_survey, + .sta_add = wl1271_op_sta_add, + .sta_remove = wl1271_op_sta_remove, + .ampdu_action = wl1271_op_ampdu_action, CFG80211_TESTMODE_CMD(wl1271_tm_cmd) }; @@ -2562,7 +3226,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev, if (wl->state == WL1271_STATE_OFF) goto out; - ret = wl1271_ps_elp_wakeup(wl, false); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out; @@ -2607,6 +3271,18 @@ int wl1271_register_hw(struct wl1271 *wl) if (wl->mac80211_registered) return 0; + ret = wl1271_fetch_nvs(wl); + if (ret == 0) { + u8 *nvs_ptr = (u8 *)wl->nvs->nvs; + + wl->mac_addr[0] = nvs_ptr[11]; + wl->mac_addr[1] = nvs_ptr[10]; + wl->mac_addr[2] = nvs_ptr[6]; + wl->mac_addr[3] = nvs_ptr[5]; + wl->mac_addr[4] = nvs_ptr[4]; + wl->mac_addr[5] = nvs_ptr[3]; + } + SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr); ret = ieee80211_register_hw(wl->hw); @@ -2629,6 +3305,9 @@ EXPORT_SYMBOL_GPL(wl1271_register_hw); void wl1271_unregister_hw(struct wl1271 *wl) { + if (wl->state == WL1271_STATE_PLT) + __wl1271_plt_stop(wl); + unregister_netdevice_notifier(&wl1271_dev_notifier); ieee80211_unregister_hw(wl->hw); wl->mac80211_registered = false; @@ -2661,13 +3340,15 @@ int wl1271_init_ieee80211(struct wl1271 *wl) IEEE80211_HW_SUPPORTS_UAPSD | IEEE80211_HW_HAS_RATE_CONTROL | IEEE80211_HW_CONNECTION_MONITOR | - IEEE80211_HW_SUPPORTS_CQM_RSSI; + IEEE80211_HW_SUPPORTS_CQM_RSSI | + IEEE80211_HW_REPORTS_TX_ACK_STATUS | + IEEE80211_HW_AP_LINK_PS; wl->hw->wiphy->cipher_suites = cipher_suites; wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC); + BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); wl->hw->wiphy->max_scan_ssids = 1; /* * Maximum length of elements in scanning probe request templates @@ -2676,8 +3357,20 @@ int wl1271_init_ieee80211(struct wl1271 *wl) */ wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - sizeof(struct ieee80211_header); - wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; - wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz; + + /* + * We keep local copies of the band structs because we need to + * modify them on a per-device basis. + */ + memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz, + sizeof(wl1271_band_2ghz)); + memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz, + sizeof(wl1271_band_5ghz)); + + wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &wl->bands[IEEE80211_BAND_2GHZ]; + wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &wl->bands[IEEE80211_BAND_5GHZ]; wl->hw->queues = 4; wl->hw->max_rates = 1; @@ -2686,6 +3379,10 @@ int wl1271_init_ieee80211(struct wl1271 *wl) SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl)); + wl->hw->sta_data_size = sizeof(struct wl1271_station); + + wl->hw->max_rx_aggregation_subframes = 8; + return 0; } EXPORT_SYMBOL_GPL(wl1271_init_ieee80211); @@ -2697,7 +3394,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void) struct ieee80211_hw *hw; struct platform_device *plat_dev = NULL; struct wl1271 *wl; - int i, ret; + int i, j, ret; unsigned int order; hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); @@ -2725,9 +3422,16 @@ struct ieee80211_hw *wl1271_alloc_hw(void) for (i = 0; i < NUM_TX_QUEUES; i++) skb_queue_head_init(&wl->tx_queue[i]); + for (i = 0; i < NUM_TX_QUEUES; i++) + for (j = 0; j < AP_MAX_LINKS; j++) + skb_queue_head_init(&wl->links[j].tx_queue[i]); + + skb_queue_head_init(&wl->deferred_rx_queue); + skb_queue_head_init(&wl->deferred_tx_queue); + INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work); - INIT_WORK(&wl->irq_work, wl1271_irq_work); + INIT_WORK(&wl->netstack_work, wl1271_netstack_work); INIT_WORK(&wl->tx_work, wl1271_tx_work); INIT_WORK(&wl->recovery_work, wl1271_recovery_work); INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); @@ -2735,19 +3439,25 @@ struct ieee80211_hw *wl1271_alloc_hw(void) wl->beacon_int = WL1271_DEFAULT_BEACON_INT; wl->default_key = 0; wl->rx_counter = 0; - wl->rx_config = WL1271_DEFAULT_RX_CONFIG; - wl->rx_filter = WL1271_DEFAULT_RX_FILTER; + wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG; + wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER; wl->psm_entry_retry = 0; wl->power_level = WL1271_DEFAULT_POWER_LEVEL; wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC; wl->basic_rate = CONF_TX_RATE_MASK_BASIC; wl->rate_set = CONF_TX_RATE_MASK_BASIC; - wl->sta_rate_set = 0; wl->band = IEEE80211_BAND_2GHZ; wl->vif = NULL; wl->flags = 0; wl->sg_enabled = true; wl->hw_pg_ver = -1; + wl->bss_type = MAX_BSS_TYPE; + wl->set_bss_type = MAX_BSS_TYPE; + wl->fw_bss_type = MAX_BSS_TYPE; + wl->last_tx_hlid = 0; + wl->ap_ps_map = 0; + wl->ap_fw_ps_map = 0; + wl->quirks = 0; memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); for (i = 0; i < ACX_TX_DESCRIPTORS; i++) @@ -2837,11 +3547,11 @@ int wl1271_free_hw(struct wl1271 *wl) } EXPORT_SYMBOL_GPL(wl1271_free_hw); -u32 wl12xx_debug_level; +u32 wl12xx_debug_level = DEBUG_NONE; EXPORT_SYMBOL_GPL(wl12xx_debug_level); -module_param_named(debug_level, wl12xx_debug_level, uint, DEBUG_NONE); +module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(debug_level, "wl12xx debugging level"); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); +MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c index 60a3738eadb0..971f13e792da 100644 --- a/drivers/net/wireless/wl12xx/ps.c +++ b/drivers/net/wireless/wl12xx/ps.c @@ -24,6 +24,7 @@ #include "reg.h" #include "ps.h" #include "io.h" +#include "tx.h" #define WL1271_WAKEUP_TIMEOUT 500 @@ -68,7 +69,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl) } } -int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake) +int wl1271_ps_elp_wakeup(struct wl1271 *wl) { DECLARE_COMPLETION_ONSTACK(compl); unsigned long flags; @@ -86,7 +87,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake) * the completion variable in one entity. */ spin_lock_irqsave(&wl->wl_lock, flags); - if (work_pending(&wl->irq_work) || chip_awake) + if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) pending = true; else wl->elp_compl = &compl; @@ -139,8 +140,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, return ret; } - ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE, - rates, send); + ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); if (ret < 0) return ret; @@ -149,7 +149,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, case STATION_ACTIVE_MODE: default: wl1271_debug(DEBUG_PSM, "leaving psm"); - ret = wl1271_ps_elp_wakeup(wl, false); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) return ret; @@ -163,8 +163,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, if (ret < 0) return ret; - ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE, - rates, send); + ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE); if (ret < 0) return ret; @@ -175,4 +174,81 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, return ret; } +static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid) +{ + int i, filtered = 0; + struct sk_buff *skb; + struct ieee80211_tx_info *info; + unsigned long flags; + + /* filter all frames currently the low level queus for this hlid */ + for (i = 0; i < NUM_TX_QUEUES; i++) { + while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) { + info = IEEE80211_SKB_CB(skb); + info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + info->status.rates[0].idx = -1; + ieee80211_tx_status(wl->hw, skb); + filtered++; + } + } + + spin_lock_irqsave(&wl->wl_lock, flags); + wl->tx_queue_count -= filtered; + spin_unlock_irqrestore(&wl->wl_lock, flags); + + wl1271_handle_tx_low_watermark(wl); +} + +void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues) +{ + struct ieee80211_sta *sta; + + if (test_bit(hlid, &wl->ap_ps_map)) + return; + + wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d blks %d " + "clean_queues %d", hlid, wl->links[hlid].allocated_blks, + clean_queues); + + rcu_read_lock(); + sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr); + if (!sta) { + wl1271_error("could not find sta %pM for starting ps", + wl->links[hlid].addr); + rcu_read_unlock(); + return; + } + ieee80211_sta_ps_transition_ni(sta, true); + rcu_read_unlock(); + + /* do we want to filter all frames from this link's queues? */ + if (clean_queues) + wl1271_ps_filter_frames(wl, hlid); + + __set_bit(hlid, &wl->ap_ps_map); +} + +void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid) +{ + struct ieee80211_sta *sta; + + if (!test_bit(hlid, &wl->ap_ps_map)) + return; + + wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid); + + __clear_bit(hlid, &wl->ap_ps_map); + + rcu_read_lock(); + sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr); + if (!sta) { + wl1271_error("could not find sta %pM for ending ps", + wl->links[hlid].addr); + goto end; + } + + ieee80211_sta_ps_transition_ni(sta, false); +end: + rcu_read_unlock(); +} diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h index 8415060f08e5..c41bd0a711bc 100644 --- a/drivers/net/wireless/wl12xx/ps.h +++ b/drivers/net/wireless/wl12xx/ps.h @@ -30,7 +30,9 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, u32 rates, bool send); void wl1271_ps_elp_sleep(struct wl1271 *wl); -int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); +int wl1271_ps_elp_wakeup(struct wl1271 *wl); void wl1271_elp_work(struct work_struct *work); +void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues); +void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid); #endif /* __WL1271_PS_H__ */ diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c index 682304c30b81..919b59f00301 100644 --- a/drivers/net/wireless/wl12xx/rx.c +++ b/drivers/net/wireless/wl12xx/rx.c @@ -29,14 +29,14 @@ #include "rx.h" #include "io.h" -static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, +static u8 wl1271_rx_get_mem_block(struct wl1271_fw_common_status *status, u32 drv_rx_counter) { return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & RX_MEM_BLOCK_MASK; } -static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status, +static u32 wl1271_rx_get_buf_size(struct wl1271_fw_common_status *status, u32 drv_rx_counter) { return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & @@ -76,7 +76,7 @@ static void wl1271_rx_status(struct wl1271 *wl, */ wl->noise = desc->rssi - (desc->snr >> 1); - status->freq = ieee80211_channel_to_frequency(desc->channel); + status->freq = ieee80211_channel_to_frequency(desc->channel, desc_band); if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; @@ -92,7 +92,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length) { struct wl1271_rx_descriptor *desc; struct sk_buff *skb; - u16 *fc; + struct ieee80211_hdr *hdr; u8 *buf; u8 beacon = 0; @@ -118,8 +118,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length) /* now we pull the descriptor out of the buffer */ skb_pull(skb, sizeof(*desc)); - fc = (u16 *)skb->data; - if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON) + hdr = (struct ieee80211_hdr *)skb->data; + if (ieee80211_is_beacon(hdr->frame_control)) beacon = 1; wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); @@ -129,12 +129,13 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length) skb_trim(skb, skb->len - desc->pad_len); - ieee80211_rx_ni(wl->hw, skb); + skb_queue_tail(&wl->deferred_rx_queue, skb); + ieee80211_queue_work(wl->hw, &wl->netstack_work); return 0; } -void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) +void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status) { struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; u32 buf_size; @@ -198,6 +199,22 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) pkt_offset += pkt_length; } } - wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, - cpu_to_le32(wl->rx_counter)); + + /* + * Write the driver's packet counter to the FW. This is only required + * for older hardware revisions + */ + if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) + wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); +} + +void wl1271_set_default_filters(struct wl1271 *wl) +{ + if (wl->bss_type == BSS_TYPE_AP_BSS) { + wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG; + wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER; + } else { + wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG; + wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER; + } } diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/wl12xx/rx.h index 3abb26fe0364..75fabf836491 100644 --- a/drivers/net/wireless/wl12xx/rx.h +++ b/drivers/net/wireless/wl12xx/rx.h @@ -30,10 +30,6 @@ #define WL1271_RX_MAX_RSSI -30 #define WL1271_RX_MIN_RSSI -95 -#define WL1271_RX_ALIGN_TO 4 -#define WL1271_RX_ALIGN(len) (((len) + WL1271_RX_ALIGN_TO - 1) & \ - ~(WL1271_RX_ALIGN_TO - 1)) - #define SHORT_PREAMBLE_BIT BIT(0) #define OFDM_RATE_BIT BIT(6) #define PBCC_RATE_BIT BIT(7) @@ -86,8 +82,9 @@ /* * RX Descriptor status * - * Bits 0-2 - status - * Bits 3-7 - reserved + * Bits 0-2 - error code + * Bits 3-5 - process_id tag (AP mode FW) + * Bits 6-7 - reserved */ #define WL1271_RX_DESC_STATUS_MASK 0x07 @@ -110,12 +107,16 @@ struct wl1271_rx_descriptor { u8 snr; __le32 timestamp; u8 packet_class; - u8 process_id; + union { + u8 process_id; /* STA FW */ + u8 hlid; /* AP FW */ + } __packed; u8 pad_len; u8 reserved; } __packed; -void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); +void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status); u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); +void wl1271_set_default_filters(struct wl1271 *wl); #endif diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c index 6f897b9d90ca..420653a2859c 100644 --- a/drivers/net/wireless/wl12xx/scan.c +++ b/drivers/net/wireless/wl12xx/scan.c @@ -27,6 +27,7 @@ #include "cmd.h" #include "scan.h" #include "acx.h" +#include "ps.h" void wl1271_scan_complete_work(struct work_struct *work) { @@ -40,10 +41,11 @@ void wl1271_scan_complete_work(struct work_struct *work) mutex_lock(&wl->mutex); - if (wl->scan.state == WL1271_SCAN_STATE_IDLE) { - mutex_unlock(&wl->mutex); - return; - } + if (wl->state == WL1271_STATE_OFF) + goto out; + + if (wl->scan.state == WL1271_SCAN_STATE_IDLE) + goto out; wl->scan.state = WL1271_SCAN_STATE_IDLE; kfree(wl->scan.scanned_ch); @@ -52,13 +54,19 @@ void wl1271_scan_complete_work(struct work_struct *work) ieee80211_scan_completed(wl->hw, false); /* restore hardware connection monitoring template */ - if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) - wl1271_cmd_build_ap_probe_req(wl, wl->probereq); + if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { + if (wl1271_ps_elp_wakeup(wl) == 0) { + wl1271_cmd_build_ap_probe_req(wl, wl->probereq); + wl1271_ps_elp_sleep(wl); + } + } if (wl->scan.failed) { wl1271_info("Scan completed due to error."); ieee80211_queue_work(wl->hw, &wl->recovery_work); } + +out: mutex_unlock(&wl->mutex); } diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c index 93cbb8d5aba9..5b9dbeafec06 100644 --- a/drivers/net/wireless/wl12xx/sdio.c +++ b/drivers/net/wireless/wl12xx/sdio.c @@ -28,6 +28,7 @@ #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/card.h> +#include <linux/mmc/host.h> #include <linux/gpio.h> #include <linux/wl12xx.h> #include <linux/pm_runtime.h> @@ -60,7 +61,7 @@ static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl) return &(wl_to_func(wl)->dev); } -static irqreturn_t wl1271_irq(int irq, void *cookie) +static irqreturn_t wl1271_hardirq(int irq, void *cookie) { struct wl1271 *wl = cookie; unsigned long flags; @@ -69,17 +70,14 @@ static irqreturn_t wl1271_irq(int irq, void *cookie) /* complete the ELP completion */ spin_lock_irqsave(&wl->wl_lock, flags); + set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); if (wl->elp_compl) { complete(wl->elp_compl); wl->elp_compl = NULL; } - - if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) - ieee80211_queue_work(wl->hw, &wl->irq_work); - set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags); spin_unlock_irqrestore(&wl->wl_lock, flags); - return IRQ_HANDLED; + return IRQ_WAKE_THREAD; } static void wl1271_sdio_disable_interrupts(struct wl1271 *wl) @@ -106,8 +104,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf, int ret; struct sdio_func *func = wl_to_func(wl); - sdio_claim_host(func); - if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", @@ -123,8 +119,6 @@ static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf, wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); } - sdio_release_host(func); - if (ret) wl1271_error("sdio read failed (%d)", ret); } @@ -135,8 +129,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf, int ret; struct sdio_func *func = wl_to_func(wl); - sdio_claim_host(func); - if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x", @@ -152,8 +144,6 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf, ret = sdio_memcpy_toio(func, addr, buf, len); } - sdio_release_host(func); - if (ret) wl1271_error("sdio write failed (%d)", ret); } @@ -163,14 +153,18 @@ static int wl1271_sdio_power_on(struct wl1271 *wl) struct sdio_func *func = wl_to_func(wl); int ret; - /* Power up the card */ + /* Make sure the card will not be powered off by runtime PM */ ret = pm_runtime_get_sync(&func->dev); if (ret < 0) goto out; + /* Runtime PM might be disabled, so power up the card manually */ + ret = mmc_power_restore_host(func->card->host); + if (ret < 0) + goto out; + sdio_claim_host(func); sdio_enable_func(func); - sdio_release_host(func); out: return ret; @@ -179,12 +173,17 @@ out: static int wl1271_sdio_power_off(struct wl1271 *wl) { struct sdio_func *func = wl_to_func(wl); + int ret; - sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); - /* Power down the card */ + /* Runtime PM might be disabled, so power off the card manually */ + ret = mmc_power_save_host(func->card->host); + if (ret < 0) + return ret; + + /* Let runtime PM know the card is powered off */ return pm_runtime_put_sync(&func->dev); } @@ -241,14 +240,14 @@ static int __devinit wl1271_probe(struct sdio_func *func, wl->irq = wlan_data->irq; wl->ref_clock = wlan_data->board_ref_clock; - ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); + ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + DRIVER_NAME, wl); if (ret < 0) { wl1271_error("request_irq() failed: %d", ret); goto out_free; } - set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); - disable_irq(wl->irq); ret = wl1271_init_ieee80211(wl); @@ -271,7 +270,6 @@ static int __devinit wl1271_probe(struct sdio_func *func, out_irq: free_irq(wl->irq, wl); - out_free: wl1271_free_hw(wl); @@ -345,3 +343,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); MODULE_FIRMWARE(WL1271_FW_NAME); +MODULE_FIRMWARE(WL1271_AP_FW_NAME); diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c index 7145ea543783..18cf01719ae0 100644 --- a/drivers/net/wireless/wl12xx/spi.c +++ b/drivers/net/wireless/wl12xx/spi.c @@ -110,6 +110,7 @@ static void wl1271_spi_reset(struct wl1271 *wl) spi_message_add_tail(&t, &m); spi_sync(wl_to_spi(wl), &m); + wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); kfree(cmd); } @@ -319,28 +320,23 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, spi_sync(wl_to_spi(wl), &m); } -static irqreturn_t wl1271_irq(int irq, void *cookie) +static irqreturn_t wl1271_hardirq(int irq, void *cookie) { - struct wl1271 *wl; + struct wl1271 *wl = cookie; unsigned long flags; wl1271_debug(DEBUG_IRQ, "IRQ"); - wl = cookie; - /* complete the ELP completion */ spin_lock_irqsave(&wl->wl_lock, flags); + set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); if (wl->elp_compl) { complete(wl->elp_compl); wl->elp_compl = NULL; } - - if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) - ieee80211_queue_work(wl->hw, &wl->irq_work); - set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags); spin_unlock_irqrestore(&wl->wl_lock, flags); - return IRQ_HANDLED; + return IRQ_WAKE_THREAD; } static int wl1271_spi_set_power(struct wl1271 *wl, bool enable) @@ -412,14 +408,14 @@ static int __devinit wl1271_probe(struct spi_device *spi) goto out_free; } - ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); + ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + DRIVER_NAME, wl); if (ret < 0) { wl1271_error("request_irq() failed: %d", ret); goto out_free; } - set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); - disable_irq(wl->irq); ret = wl1271_init_ieee80211(wl); @@ -494,4 +490,5 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); MODULE_FIRMWARE(WL1271_FW_NAME); +MODULE_FIRMWARE(WL1271_AP_FW_NAME); MODULE_ALIAS("spi:wl1271"); diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c index b44c75cd8c1e..5e9ef7d53e7e 100644 --- a/drivers/net/wireless/wl12xx/tx.c +++ b/drivers/net/wireless/wl12xx/tx.c @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/etherdevice.h> #include "wl12xx.h" #include "io.h" @@ -30,6 +31,23 @@ #include "ps.h" #include "tx.h" +static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id) +{ + int ret; + bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); + + if (is_ap) + ret = wl1271_cmd_set_ap_default_wep_key(wl, id); + else + ret = wl1271_cmd_set_sta_default_wep_key(wl, id); + + if (ret < 0) + return ret; + + wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id); + return 0; +} + static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) { int id; @@ -52,8 +70,65 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id) } } +static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr; + + /* + * add the station to the known list before transmitting the + * authentication response. this way it won't get de-authed by FW + * when transmitting too soon. + */ + hdr = (struct ieee80211_hdr *)(skb->data + + sizeof(struct wl1271_tx_hw_descr)); + if (ieee80211_is_auth(hdr->frame_control)) + wl1271_acx_set_inconnection_sta(wl, hdr->addr1); +} + +static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid) +{ + bool fw_ps; + u8 tx_blks; + + /* only regulate station links */ + if (hlid < WL1271_AP_STA_HLID_START) + return; + + fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); + tx_blks = wl->links[hlid].allocated_blks; + + /* + * if in FW PS and there is enough data in FW we can put the link + * into high-level PS and clean out its TX queues. + */ + if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS) + wl1271_ps_link_start(wl, hlid, true); +} + +u8 wl1271_tx_get_hlid(struct sk_buff *skb) +{ + struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); + + if (control->control.sta) { + struct wl1271_station *wl_sta; + + wl_sta = (struct wl1271_station *) + control->control.sta->drv_priv; + return wl_sta->hlid; + } else { + struct ieee80211_hdr *hdr; + + hdr = (struct ieee80211_hdr *)skb->data; + if (ieee80211_is_mgmt(hdr->frame_control)) + return WL1271_AP_GLOBAL_HLID; + else + return WL1271_AP_BROADCAST_HLID; + } +} + static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, - u32 buf_offset) + u32 buf_offset, u8 hlid) { struct wl1271_tx_hw_descr *desc; u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; @@ -82,6 +157,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, wl->tx_blocks_available -= total_blocks; + if (wl->bss_type == BSS_TYPE_AP_BSS) + wl->links[hlid].allocated_blks += total_blocks; + ret = 0; wl1271_debug(DEBUG_TX, @@ -95,11 +173,12 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, } static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, - u32 extra, struct ieee80211_tx_info *control) + u32 extra, struct ieee80211_tx_info *control, + u8 hlid) { struct timespec ts; struct wl1271_tx_hw_descr *desc; - int pad, ac; + int pad, ac, rate_idx; s64 hosttime; u16 tx_attr; @@ -117,7 +196,11 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, getnstimeofday(&ts); hosttime = (timespec_to_ns(&ts) >> 10); desc->start_time = cpu_to_le32(hosttime - wl->time_offset); - desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); + + if (wl->bss_type != BSS_TYPE_AP_BSS) + desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); + else + desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); /* configure the tx attributes */ tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; @@ -125,25 +208,49 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, /* queue (we use same identifiers for tid's and ac's */ ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); desc->tid = ac; - desc->aid = TX_HW_DEFAULT_AID; + + if (wl->bss_type != BSS_TYPE_AP_BSS) { + desc->aid = hlid; + + /* if the packets are destined for AP (have a STA entry) + send them with AP rate policies, otherwise use default + basic rates */ + if (control->control.sta) + rate_idx = ACX_TX_AP_FULL_RATE; + else + rate_idx = ACX_TX_BASIC_RATE; + } else { + desc->hlid = hlid; + switch (hlid) { + case WL1271_AP_GLOBAL_HLID: + rate_idx = ACX_TX_AP_MODE_MGMT_RATE; + break; + case WL1271_AP_BROADCAST_HLID: + rate_idx = ACX_TX_AP_MODE_BCST_RATE; + break; + default: + rate_idx = ac; + break; + } + } + + tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; desc->reserved = 0; /* align the length (and store in terms of words) */ - pad = WL1271_TX_ALIGN(skb->len); + pad = ALIGN(skb->len, WL1271_TX_ALIGN_TO); desc->length = cpu_to_le16(pad >> 2); /* calculate number of padding bytes */ pad = pad - skb->len; tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; - /* if the packets are destined for AP (have a STA entry) send them - with AP rate policies, otherwise use default basic rates */ - if (control->control.sta) - tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY; - desc->tx_attr = cpu_to_le16(tx_attr); - wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); + wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d " + "tx_attr: 0x%x len: %d life: %d mem: %d", pad, desc->hlid, + le16_to_cpu(desc->tx_attr), le16_to_cpu(desc->length), + le16_to_cpu(desc->life_time), desc->total_mem_blocks); } /* caller must hold wl->mutex */ @@ -153,8 +260,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, struct ieee80211_tx_info *info; u32 extra = 0; int ret = 0; - u8 idx; u32 total_len; + u8 hlid; if (!skb) return -EINVAL; @@ -166,29 +273,43 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb, extra = WL1271_TKIP_IV_SPACE; if (info->control.hw_key) { - idx = info->control.hw_key->hw_key_idx; + bool is_wep; + u8 idx = info->control.hw_key->hw_key_idx; + u32 cipher = info->control.hw_key->cipher; - /* FIXME: do we have to do this if we're not using WEP? */ - if (unlikely(wl->default_key != idx)) { - ret = wl1271_cmd_set_default_wep_key(wl, idx); + is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || + (cipher == WLAN_CIPHER_SUITE_WEP104); + + if (unlikely(is_wep && wl->default_key != idx)) { + ret = wl1271_set_default_wep_key(wl, idx); if (ret < 0) return ret; wl->default_key = idx; } } - ret = wl1271_tx_allocate(wl, skb, extra, buf_offset); + if (wl->bss_type == BSS_TYPE_AP_BSS) + hlid = wl1271_tx_get_hlid(skb); + else + hlid = TX_HW_DEFAULT_AID; + + ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid); if (ret < 0) return ret; - wl1271_tx_fill_hdr(wl, skb, extra, info); + if (wl->bss_type == BSS_TYPE_AP_BSS) { + wl1271_tx_ap_update_inconnection_sta(wl, skb); + wl1271_tx_regulate_link(wl, hlid); + } + + wl1271_tx_fill_hdr(wl, skb, extra, info, hlid); /* * The length of each packet is stored in terms of words. Thus, we must * pad the skb data to make sure its length is aligned. * The number of padding bytes is computed and set in wl1271_tx_fill_hdr */ - total_len = WL1271_TX_ALIGN(skb->len); + total_len = ALIGN(skb->len, WL1271_TX_ALIGN_TO); memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); @@ -222,7 +343,7 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set) return enabled_rates; } -static void handle_tx_low_watermark(struct wl1271 *wl) +void wl1271_handle_tx_low_watermark(struct wl1271 *wl) { unsigned long flags; @@ -236,7 +357,7 @@ static void handle_tx_low_watermark(struct wl1271 *wl) } } -static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) +static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl) { struct sk_buff *skb = NULL; unsigned long flags; @@ -262,12 +383,69 @@ out: return skb; } +static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl) +{ + struct sk_buff *skb = NULL; + unsigned long flags; + int i, h, start_hlid; + + /* start from the link after the last one */ + start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS; + + /* dequeue according to AC, round robin on each link */ + for (i = 0; i < AP_MAX_LINKS; i++) { + h = (start_hlid + i) % AP_MAX_LINKS; + + skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VO]); + if (skb) + goto out; + skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_VI]); + if (skb) + goto out; + skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BE]); + if (skb) + goto out; + skb = skb_dequeue(&wl->links[h].tx_queue[CONF_TX_AC_BK]); + if (skb) + goto out; + } + +out: + if (skb) { + wl->last_tx_hlid = h; + spin_lock_irqsave(&wl->wl_lock, flags); + wl->tx_queue_count--; + spin_unlock_irqrestore(&wl->wl_lock, flags); + } else { + wl->last_tx_hlid = 0; + } + + return skb; +} + +static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) +{ + if (wl->bss_type == BSS_TYPE_AP_BSS) + return wl1271_ap_skb_dequeue(wl); + + return wl1271_sta_skb_dequeue(wl); +} + static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb) { unsigned long flags; int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); - skb_queue_head(&wl->tx_queue[q], skb); + if (wl->bss_type == BSS_TYPE_AP_BSS) { + u8 hlid = wl1271_tx_get_hlid(skb); + skb_queue_head(&wl->links[hlid].tx_queue[q], skb); + + /* make sure we dequeue the same packet next time */ + wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS; + } else { + skb_queue_head(&wl->tx_queue[q], skb); + } + spin_lock_irqsave(&wl->wl_lock, flags); wl->tx_queue_count++; spin_unlock_irqrestore(&wl->wl_lock, flags); @@ -277,38 +455,16 @@ void wl1271_tx_work_locked(struct wl1271 *wl) { struct sk_buff *skb; bool woken_up = false; - u32 sta_rates = 0; u32 buf_offset = 0; bool sent_packets = false; int ret; - /* check if the rates supported by the AP have changed */ - if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED, - &wl->flags))) { - unsigned long flags; - - spin_lock_irqsave(&wl->wl_lock, flags); - sta_rates = wl->sta_rate_set; - spin_unlock_irqrestore(&wl->wl_lock, flags); - } - if (unlikely(wl->state == WL1271_STATE_OFF)) goto out; - /* if rates have changed, re-configure the rate policy */ - if (unlikely(sta_rates)) { - ret = wl1271_ps_elp_wakeup(wl, false); - if (ret < 0) - goto out; - woken_up = true; - - wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates); - wl1271_acx_rate_policies(wl); - } - while ((skb = wl1271_skb_dequeue(wl))) { if (!woken_up) { - ret = wl1271_ps_elp_wakeup(wl, false); + ret = wl1271_ps_elp_wakeup(wl); if (ret < 0) goto out_ack; woken_up = true; @@ -350,9 +506,15 @@ out_ack: sent_packets = true; } if (sent_packets) { - /* interrupt the firmware with the new packets */ - wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); - handle_tx_low_watermark(wl); + /* + * Interrupt the firmware with the new packets. This is only + * required for older hardware revisions + */ + if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) + wl1271_write32(wl, WL1271_HOST_WR_ACCESS, + wl->tx_packets_count); + + wl1271_handle_tx_low_watermark(wl); } out: @@ -427,7 +589,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl, result->rate_class_index, result->status); /* return the packet to the stack */ - ieee80211_tx_status(wl->hw, skb); + skb_queue_tail(&wl->deferred_tx_queue, skb); + ieee80211_queue_work(wl->hw, &wl->netstack_work); wl1271_free_tx_id(wl, result->id); } @@ -469,34 +632,92 @@ void wl1271_tx_complete(struct wl1271 *wl) } } +void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) +{ + struct sk_buff *skb; + int i, total = 0; + unsigned long flags; + struct ieee80211_tx_info *info; + + for (i = 0; i < NUM_TX_QUEUES; i++) { + while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) { + wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); + info = IEEE80211_SKB_CB(skb); + info->status.rates[0].idx = -1; + info->status.rates[0].count = 0; + ieee80211_tx_status(wl->hw, skb); + total++; + } + } + + spin_lock_irqsave(&wl->wl_lock, flags); + wl->tx_queue_count -= total; + spin_unlock_irqrestore(&wl->wl_lock, flags); + + wl1271_handle_tx_low_watermark(wl); +} + /* caller must hold wl->mutex */ void wl1271_tx_reset(struct wl1271 *wl) { int i; struct sk_buff *skb; + struct ieee80211_tx_info *info; /* TX failure */ - for (i = 0; i < NUM_TX_QUEUES; i++) { - while ((skb = skb_dequeue(&wl->tx_queue[i]))) { - wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); - ieee80211_tx_status(wl->hw, skb); + if (wl->bss_type == BSS_TYPE_AP_BSS) { + for (i = 0; i < AP_MAX_LINKS; i++) { + wl1271_tx_reset_link_queues(wl, i); + wl->links[i].allocated_blks = 0; + wl->links[i].prev_freed_blks = 0; + } + + wl->last_tx_hlid = 0; + } else { + for (i = 0; i < NUM_TX_QUEUES; i++) { + while ((skb = skb_dequeue(&wl->tx_queue[i]))) { + wl1271_debug(DEBUG_TX, "freeing skb 0x%p", + skb); + info = IEEE80211_SKB_CB(skb); + info->status.rates[0].idx = -1; + info->status.rates[0].count = 0; + ieee80211_tx_status(wl->hw, skb); + } } } + wl->tx_queue_count = 0; /* * Make sure the driver is at a consistent state, in case this * function is called from a context other than interface removal. */ - handle_tx_low_watermark(wl); + wl1271_handle_tx_low_watermark(wl); - for (i = 0; i < ACX_TX_DESCRIPTORS; i++) - if (wl->tx_frames[i] != NULL) { - skb = wl->tx_frames[i]; - wl1271_free_tx_id(wl, i); - wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); - ieee80211_tx_status(wl->hw, skb); + for (i = 0; i < ACX_TX_DESCRIPTORS; i++) { + if (wl->tx_frames[i] == NULL) + continue; + + skb = wl->tx_frames[i]; + wl1271_free_tx_id(wl, i); + wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); + + /* Remove private headers before passing the skb to mac80211 */ + info = IEEE80211_SKB_CB(skb); + skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); + if (info->control.hw_key && + info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { + int hdrlen = ieee80211_get_hdrlen_from_skb(skb); + memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, + hdrlen); + skb_pull(skb, WL1271_TKIP_IV_SPACE); } + + info->status.rates[0].idx = -1; + info->status.rates[0].count = 0; + + ieee80211_tx_status(wl->hw, skb); + } } #define WL1271_TX_FLUSH_TIMEOUT 500000 @@ -509,8 +730,8 @@ void wl1271_tx_flush(struct wl1271 *wl) while (!time_after(jiffies, timeout)) { mutex_lock(&wl->mutex); - wl1271_debug(DEBUG_TX, "flushing tx buffer: %d", - wl->tx_frames_cnt); + wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d", + wl->tx_frames_cnt, wl->tx_queue_count); if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) { mutex_unlock(&wl->mutex); return; @@ -521,3 +742,21 @@ void wl1271_tx_flush(struct wl1271 *wl) wl1271_warning("Unable to flush all TX buffers, timed out."); } + +u32 wl1271_tx_min_rate_get(struct wl1271 *wl) +{ + int i; + u32 rate = 0; + + if (!wl->basic_rate_set) { + WARN_ON(1); + wl->basic_rate_set = wl->conf.tx.basic_rate; + } + + for (i = 0; !rate; i++) { + if ((wl->basic_rate_set >> i) & 0x1) + rate = 1 << i; + } + + return rate; +} diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h index 903e5dc69b7a..02f07fa66e82 100644 --- a/drivers/net/wireless/wl12xx/tx.h +++ b/drivers/net/wireless/wl12xx/tx.h @@ -29,6 +29,7 @@ #define TX_HW_BLOCK_SIZE 252 #define TX_HW_MGMT_PKT_LIFETIME_TU 2000 +#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000 /* The chipset reference driver states, that the "aid" value 1 * is for infra-BSS, but is still always used */ #define TX_HW_DEFAULT_AID 1 @@ -52,8 +53,6 @@ #define TX_HW_RESULT_QUEUE_LEN_MASK 0xf #define WL1271_TX_ALIGN_TO 4 -#define WL1271_TX_ALIGN(len) (((len) + WL1271_TX_ALIGN_TO - 1) & \ - ~(WL1271_TX_ALIGN_TO - 1)) #define WL1271_TKIP_IV_SPACE 4 struct wl1271_tx_hw_descr { @@ -77,8 +76,12 @@ struct wl1271_tx_hw_descr { u8 id; /* The packet TID value (as User-Priority) */ u8 tid; - /* Identifier of the remote STA in IBSS, 1 in infra-BSS */ - u8 aid; + union { + /* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */ + u8 aid; + /* AP - host link ID (HLID) */ + u8 hlid; + } __packed; u8 reserved; } __packed; @@ -146,5 +149,9 @@ void wl1271_tx_reset(struct wl1271 *wl); void wl1271_tx_flush(struct wl1271 *wl); u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set); +u32 wl1271_tx_min_rate_get(struct wl1271 *wl); +u8 wl1271_tx_get_hlid(struct sk_buff *skb); +void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); +void wl1271_handle_tx_low_watermark(struct wl1271 *wl); #endif diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h index 9050dd9b62d2..86be83e25ec5 100644 --- a/drivers/net/wireless/wl12xx/wl12xx.h +++ b/drivers/net/wireless/wl12xx/wl12xx.h @@ -38,6 +38,13 @@ #define DRIVER_NAME "wl1271" #define DRIVER_PREFIX DRIVER_NAME ": " +/* + * FW versions support BA 11n + * versions marks x.x.x.50-60.x + */ +#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50 +#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60 + enum { DEBUG_NONE = 0, DEBUG_IRQ = BIT(0), @@ -57,6 +64,8 @@ enum { DEBUG_SDIO = BIT(14), DEBUG_FILTERS = BIT(15), DEBUG_ADHOC = BIT(16), + DEBUG_AP = BIT(17), + DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP), DEBUG_ALL = ~0, }; @@ -103,17 +112,28 @@ extern u32 wl12xx_debug_level; true); \ } while (0) -#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \ +#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN | \ CFG_BSSID_FILTER_EN | \ CFG_MC_FILTER_EN) -#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \ +#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \ CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \ CFG_RX_CTL_EN | CFG_RX_BCN_EN | \ CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) -#define WL1271_FW_NAME "wl1271-fw.bin" -#define WL1271_NVS_NAME "wl1271-nvs.bin" +#define WL1271_DEFAULT_AP_RX_CONFIG 0 + +#define WL1271_DEFAULT_AP_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \ + CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \ + CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \ + CFG_RX_ASSOC_EN) + + + +#define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin" +#define WL1271_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin" + +#define WL1271_NVS_NAME "ti-connectivity/wl1271-nvs.bin" #define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff)) #define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff)) @@ -129,6 +149,25 @@ extern u32 wl12xx_debug_level; #define WL1271_DEFAULT_BEACON_INT 100 #define WL1271_DEFAULT_DTIM_PERIOD 1 +#define WL1271_AP_GLOBAL_HLID 0 +#define WL1271_AP_BROADCAST_HLID 1 +#define WL1271_AP_STA_HLID_START 2 + +/* + * When in AP-mode, we allow (at least) this number of mem-blocks + * to be transmitted to FW for a STA in PS-mode. Only when packets are + * present in the FW buffers it will wake the sleeping STA. We want to put + * enough packets for the driver to transmit all of its buffered data before + * the STA goes to sleep again. But we don't want to take too much mem-blocks + * as it might hurt the throughput of active STAs. + * The number of blocks (18) is enough for 2 large packets. + */ +#define WL1271_PS_STA_MAX_BLOCKS (2 * 9) + +#define WL1271_AP_BSS_INDEX 0 +#define WL1271_AP_DEF_INACTIV_SEC 300 +#define WL1271_AP_DEF_BEACON_EXP 20 + #define ACX_TX_DESCRIPTORS 32 #define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) @@ -161,10 +200,13 @@ struct wl1271_partition_set { struct wl1271; +#define WL12XX_NUM_FW_VER 5 + /* FIXME: I'm not sure about this structure name */ struct wl1271_chip { u32 id; - char fw_ver[21]; + char fw_ver_str[ETHTOOL_BUSINFO_LEN]; + unsigned int fw_ver[WL12XX_NUM_FW_VER]; }; struct wl1271_stats { @@ -178,8 +220,13 @@ struct wl1271_stats { #define NUM_TX_QUEUES 4 #define NUM_RX_PKT_DESC 8 -/* FW status registers */ -struct wl1271_fw_status { +#define AP_MAX_STATIONS 5 + +/* Broadcast and Global links + links to stations */ +#define AP_MAX_LINKS (AP_MAX_STATIONS + 2) + +/* FW status registers common for AP/STA */ +struct wl1271_fw_common_status { __le32 intr; u8 fw_rx_counter; u8 drv_rx_counter; @@ -188,9 +235,43 @@ struct wl1271_fw_status { __le32 rx_pkt_descs[NUM_RX_PKT_DESC]; __le32 tx_released_blks[NUM_TX_QUEUES]; __le32 fw_localtime; - __le32 padding[2]; } __packed; +/* FW status registers for AP */ +struct wl1271_fw_ap_status { + struct wl1271_fw_common_status common; + + /* Next fields valid only in AP FW */ + + /* + * A bitmap (where each bit represents a single HLID) + * to indicate if the station is in PS mode. + */ + __le32 link_ps_bitmap; + + /* Number of freed MBs per HLID */ + u8 tx_lnk_free_blks[AP_MAX_LINKS]; + u8 padding_1[1]; +} __packed; + +/* FW status registers for STA */ +struct wl1271_fw_sta_status { + struct wl1271_fw_common_status common; + + u8 tx_total; + u8 reserved1; + __le16 reserved2; +} __packed; + +struct wl1271_fw_full_status { + union { + struct wl1271_fw_common_status common; + struct wl1271_fw_sta_status sta; + struct wl1271_fw_ap_status ap; + }; +} __packed; + + struct wl1271_rx_mem_pool_addr { u32 addr; u32 addr_extra; @@ -218,6 +299,48 @@ struct wl1271_if_operations { void (*disable_irq)(struct wl1271 *wl); }; +#define MAX_NUM_KEYS 14 +#define MAX_KEY_SIZE 32 + +struct wl1271_ap_key { + u8 id; + u8 key_type; + u8 key_size; + u8 key[MAX_KEY_SIZE]; + u8 hlid; + u32 tx_seq_32; + u16 tx_seq_16; +}; + +enum wl12xx_flags { + WL1271_FLAG_STA_ASSOCIATED, + WL1271_FLAG_JOINED, + WL1271_FLAG_GPIO_POWER, + WL1271_FLAG_TX_QUEUE_STOPPED, + WL1271_FLAG_TX_PENDING, + WL1271_FLAG_IN_ELP, + WL1271_FLAG_PSM, + WL1271_FLAG_PSM_REQUESTED, + WL1271_FLAG_IRQ_RUNNING, + WL1271_FLAG_IDLE, + WL1271_FLAG_IDLE_REQUESTED, + WL1271_FLAG_PSPOLL_FAILURE, + WL1271_FLAG_STA_STATE_SENT, + WL1271_FLAG_FW_TX_BUSY, + WL1271_FLAG_AP_STARTED +}; + +struct wl1271_link { + /* AP-mode - TX queue per AC in link */ + struct sk_buff_head tx_queue[NUM_TX_QUEUES]; + + /* accounting for allocated / available TX blocks in FW */ + u8 allocated_blks; + u8 prev_freed_blks; + + u8 addr[ETH_ALEN]; +}; + struct wl1271 { struct platform_device *plat_dev; struct ieee80211_hw *hw; @@ -236,21 +359,6 @@ struct wl1271 { enum wl1271_state state; struct mutex mutex; -#define WL1271_FLAG_STA_RATES_CHANGED (0) -#define WL1271_FLAG_STA_ASSOCIATED (1) -#define WL1271_FLAG_JOINED (2) -#define WL1271_FLAG_GPIO_POWER (3) -#define WL1271_FLAG_TX_QUEUE_STOPPED (4) -#define WL1271_FLAG_IN_ELP (5) -#define WL1271_FLAG_PSM (6) -#define WL1271_FLAG_PSM_REQUESTED (7) -#define WL1271_FLAG_IRQ_PENDING (8) -#define WL1271_FLAG_IRQ_RUNNING (9) -#define WL1271_FLAG_IDLE (10) -#define WL1271_FLAG_IDLE_REQUESTED (11) -#define WL1271_FLAG_PSPOLL_FAILURE (12) -#define WL1271_FLAG_STA_STATE_SENT (13) -#define WL1271_FLAG_FW_TX_BUSY (14) unsigned long flags; struct wl1271_partition_set part; @@ -262,6 +370,7 @@ struct wl1271 { u8 *fw; size_t fw_len; + u8 fw_bss_type; struct wl1271_nvs_file *nvs; size_t nvs_len; @@ -295,6 +404,12 @@ struct wl1271 { struct sk_buff_head tx_queue[NUM_TX_QUEUES]; int tx_queue_count; + /* Frames received, not handled yet by mac80211 */ + struct sk_buff_head deferred_rx_queue; + + /* Frames sent, not returned yet to mac80211 */ + struct sk_buff_head deferred_tx_queue; + struct work_struct tx_work; /* Pending TX frames */ @@ -315,8 +430,8 @@ struct wl1271 { /* Intermediate buffer, used for packet aggregation */ u8 *aggr_buf; - /* The target interrupt mask */ - struct work_struct irq_work; + /* Network stack work */ + struct work_struct netstack_work; /* Hardware recovery work */ struct work_struct recovery_work; @@ -343,7 +458,6 @@ struct wl1271 { * bits 16-23 - 802.11n MCS index mask * support only 1 stream, thus only 8 bits for the MCS rates (0-7). */ - u32 sta_rate_set; u32 basic_rate_set; u32 basic_rate; u32 rate_set; @@ -378,13 +492,12 @@ struct wl1271 { int last_rssi_event; struct wl1271_stats stats; - struct dentry *rootdir; __le32 buffer_32; u32 buffer_cmd; u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; - struct wl1271_fw_status *fw_status; + struct wl1271_fw_full_status *fw_status; struct wl1271_tx_hw_res_if *tx_res_if; struct ieee80211_vif *vif; @@ -400,6 +513,41 @@ struct wl1271 { /* Most recently reported noise in dBm */ s8 noise; + + /* map for HLIDs of associated stations - when operating in AP mode */ + unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)]; + + /* recoreded keys for AP-mode - set here before AP startup */ + struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS]; + + /* bands supported by this instance of wl12xx */ + struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; + + /* RX BA constraint value */ + bool ba_support; + u8 ba_rx_bitmap; + + /* + * AP-mode - links indexed by HLID. The global and broadcast links + * are always active. + */ + struct wl1271_link links[AP_MAX_LINKS]; + + /* the hlid of the link where the last transmitted skb came from */ + int last_tx_hlid; + + /* AP-mode - a bitmap of links currently in PS mode according to FW */ + u32 ap_fw_ps_map; + + /* AP-mode - a bitmap of links currently in PS mode in mac80211 */ + unsigned long ap_ps_map; + + /* Quirks of specific hardware revisions */ + unsigned int quirks; +}; + +struct wl1271_station { + u8 hlid; }; int wl1271_plt_start(struct wl1271 *wl); @@ -414,6 +562,8 @@ int wl1271_plt_stop(struct wl1271 *wl); #define WL1271_TX_QUEUE_LOW_WATERMARK 10 #define WL1271_TX_QUEUE_HIGH_WATERMARK 25 +#define WL1271_DEFERRED_QUEUE_LIMIT 64 + /* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power on in case is has been shut down shortly before */ #define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */ @@ -423,4 +573,9 @@ int wl1271_plt_stop(struct wl1271 *wl); #define HW_BG_RATES_MASK 0xffff #define HW_HT_RATES_OFFSET 16 +/* Quirks */ + +/* Each RX/TX transaction requires an end-of-transaction transfer */ +#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0) + #endif diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h index be21032f4dc1..18fe542360f2 100644 --- a/drivers/net/wireless/wl12xx/wl12xx_80211.h +++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h @@ -55,7 +55,6 @@ /* This really should be 8, but not for our firmware */ #define MAX_SUPPORTED_RATES 32 -#define COUNTRY_STRING_LEN 3 #define MAX_COUNTRY_TRIPLETS 32 /* Headers */ @@ -99,7 +98,7 @@ struct country_triplet { struct wl12xx_ie_country { struct wl12xx_ie_header header; - u8 country_string[COUNTRY_STRING_LEN]; + u8 country_string[IEEE80211_COUNTRY_STRING_LEN]; struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; } __packed; @@ -138,13 +137,13 @@ struct wl12xx_arp_rsp_template { struct ieee80211_hdr_3addr hdr; u8 llc_hdr[sizeof(rfc1042_header)]; - u16 llc_type; + __be16 llc_type; struct arphdr arp_hdr; u8 sender_hw[ETH_ALEN]; - u32 sender_ip; + __be32 sender_ip; u8 target_hw[ETH_ALEN]; - u32 target_ip; + __be32 target_ip; } __packed; @@ -160,4 +159,9 @@ struct wl12xx_probe_resp_template { struct wl12xx_ie_country country; } __packed; +struct wl12xx_disconn_template { + struct ieee80211_header header; + __le16 disconn_reason; +} __packed; + #endif diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c index 6a9b66051cf7..a73a305d3cba 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.c +++ b/drivers/net/wireless/zd1211rw/zd_chip.c @@ -108,25 +108,17 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr { int r; int i; - zd_addr_t *a16; - u16 *v16; + zd_addr_t a16[USB_MAX_IOREAD32_COUNT * 2]; + u16 v16[USB_MAX_IOREAD32_COUNT * 2]; unsigned int count16; if (count > USB_MAX_IOREAD32_COUNT) return -EINVAL; - /* Allocate a single memory block for values and addresses. */ - count16 = 2*count; - /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */ - a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)), - GFP_KERNEL); - if (!a16) { - dev_dbg_f(zd_chip_dev(chip), - "error ENOMEM in allocation of a16\n"); - r = -ENOMEM; - goto out; - } - v16 = (u16 *)(a16 + count16); + /* Use stack for values and addresses. */ + count16 = 2 * count; + BUG_ON(count16 * sizeof(zd_addr_t) > sizeof(a16)); + BUG_ON(count16 * sizeof(u16) > sizeof(v16)); for (i = 0; i < count; i++) { int j = 2*i; @@ -139,7 +131,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr if (r) { dev_dbg_f(zd_chip_dev(chip), "error: zd_ioread16v_locked. Error number %d\n", r); - goto out; + return r; } for (i = 0; i < count; i++) { @@ -147,18 +139,19 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr values[i] = (v16[j] << 16) | v16[j+1]; } -out: - kfree((void *)a16); - return r; + return 0; } -int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, - unsigned int count) +static int _zd_iowrite32v_async_locked(struct zd_chip *chip, + const struct zd_ioreq32 *ioreqs, + unsigned int count) { int i, j, r; - struct zd_ioreq16 *ioreqs16; + struct zd_ioreq16 ioreqs16[USB_MAX_IOWRITE32_COUNT * 2]; unsigned int count16; + /* Use stack for values and addresses. */ + ZD_ASSERT(mutex_is_locked(&chip->mutex)); if (count == 0) @@ -166,15 +159,8 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, if (count > USB_MAX_IOWRITE32_COUNT) return -EINVAL; - /* Allocate a single memory block for values and addresses. */ - count16 = 2*count; - ioreqs16 = kmalloc(count16 * sizeof(struct zd_ioreq16), GFP_KERNEL); - if (!ioreqs16) { - r = -ENOMEM; - dev_dbg_f(zd_chip_dev(chip), - "error %d in ioreqs16 allocation\n", r); - goto out; - } + count16 = 2 * count; + BUG_ON(count16 * sizeof(struct zd_ioreq16) > sizeof(ioreqs16)); for (i = 0; i < count; i++) { j = 2*i; @@ -185,18 +171,30 @@ int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, ioreqs16[j+1].addr = ioreqs[i].addr; } - r = zd_usb_iowrite16v(&chip->usb, ioreqs16, count16); + r = zd_usb_iowrite16v_async(&chip->usb, ioreqs16, count16); #ifdef DEBUG if (r) { dev_dbg_f(zd_chip_dev(chip), "error %d in zd_usb_write16v\n", r); } #endif /* DEBUG */ -out: - kfree(ioreqs16); return r; } +int _zd_iowrite32v_locked(struct zd_chip *chip, const struct zd_ioreq32 *ioreqs, + unsigned int count) +{ + int r; + + zd_usb_iowrite16v_async_start(&chip->usb); + r = _zd_iowrite32v_async_locked(chip, ioreqs, count); + if (r) { + zd_usb_iowrite16v_async_end(&chip->usb, 0); + return r; + } + return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */); +} + int zd_iowrite16a_locked(struct zd_chip *chip, const struct zd_ioreq16 *ioreqs, unsigned int count) { @@ -204,6 +202,8 @@ int zd_iowrite16a_locked(struct zd_chip *chip, unsigned int i, j, t, max; ZD_ASSERT(mutex_is_locked(&chip->mutex)); + zd_usb_iowrite16v_async_start(&chip->usb); + for (i = 0; i < count; i += j + t) { t = 0; max = count-i; @@ -216,8 +216,9 @@ int zd_iowrite16a_locked(struct zd_chip *chip, } } - r = zd_usb_iowrite16v(&chip->usb, &ioreqs[i], j); + r = zd_usb_iowrite16v_async(&chip->usb, &ioreqs[i], j); if (r) { + zd_usb_iowrite16v_async_end(&chip->usb, 0); dev_dbg_f(zd_chip_dev(chip), "error zd_usb_iowrite16v. Error number %d\n", r); @@ -225,7 +226,7 @@ int zd_iowrite16a_locked(struct zd_chip *chip, } } - return 0; + return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */); } /* Writes a variable number of 32 bit registers. The functions will split @@ -238,6 +239,8 @@ int zd_iowrite32a_locked(struct zd_chip *chip, int r; unsigned int i, j, t, max; + zd_usb_iowrite16v_async_start(&chip->usb); + for (i = 0; i < count; i += j + t) { t = 0; max = count-i; @@ -250,8 +253,9 @@ int zd_iowrite32a_locked(struct zd_chip *chip, } } - r = _zd_iowrite32v_locked(chip, &ioreqs[i], j); + r = _zd_iowrite32v_async_locked(chip, &ioreqs[i], j); if (r) { + zd_usb_iowrite16v_async_end(&chip->usb, 0); dev_dbg_f(zd_chip_dev(chip), "error _zd_iowrite32v_locked." " Error number %d\n", r); @@ -259,7 +263,7 @@ int zd_iowrite32a_locked(struct zd_chip *chip, } } - return 0; + return zd_usb_iowrite16v_async_end(&chip->usb, 50 /* ms */); } int zd_ioread16(struct zd_chip *chip, zd_addr_t addr, u16 *value) @@ -370,16 +374,12 @@ error: return r; } -/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and - * CR_MAC_ADDR_P2 must be overwritten - */ -int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) +static int zd_write_mac_addr_common(struct zd_chip *chip, const u8 *mac_addr, + const struct zd_ioreq32 *in_reqs, + const char *type) { int r; - struct zd_ioreq32 reqs[2] = { - [0] = { .addr = CR_MAC_ADDR_P1 }, - [1] = { .addr = CR_MAC_ADDR_P2 }, - }; + struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]}; if (mac_addr) { reqs[0].value = (mac_addr[3] << 24) @@ -388,9 +388,9 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) | mac_addr[0]; reqs[1].value = (mac_addr[5] << 8) | mac_addr[4]; - dev_dbg_f(zd_chip_dev(chip), "mac addr %pM\n", mac_addr); + dev_dbg_f(zd_chip_dev(chip), "%s addr %pM\n", type, mac_addr); } else { - dev_dbg_f(zd_chip_dev(chip), "set NULL mac\n"); + dev_dbg_f(zd_chip_dev(chip), "set NULL %s\n", type); } mutex_lock(&chip->mutex); @@ -399,6 +399,29 @@ int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) return r; } +/* MAC address: if custom mac addresses are to be used CR_MAC_ADDR_P1 and + * CR_MAC_ADDR_P2 must be overwritten + */ +int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) +{ + static const struct zd_ioreq32 reqs[2] = { + [0] = { .addr = CR_MAC_ADDR_P1 }, + [1] = { .addr = CR_MAC_ADDR_P2 }, + }; + + return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac"); +} + +int zd_write_bssid(struct zd_chip *chip, const u8 *bssid) +{ + static const struct zd_ioreq32 reqs[2] = { + [0] = { .addr = CR_BSSID_P1 }, + [1] = { .addr = CR_BSSID_P2 }, + }; + + return zd_write_mac_addr_common(chip, bssid, reqs, "bssid"); +} + int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain) { int r; @@ -849,11 +872,12 @@ static int get_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) { struct zd_ioreq32 reqs[3]; + u16 b_interval = s->beacon_interval & 0xffff; - if (s->beacon_interval <= 5) - s->beacon_interval = 5; - if (s->pre_tbtt < 4 || s->pre_tbtt >= s->beacon_interval) - s->pre_tbtt = s->beacon_interval - 1; + if (b_interval <= 5) + b_interval = 5; + if (s->pre_tbtt < 4 || s->pre_tbtt >= b_interval) + s->pre_tbtt = b_interval - 1; if (s->atim_wnd_period >= s->pre_tbtt) s->atim_wnd_period = s->pre_tbtt - 1; @@ -862,31 +886,57 @@ static int set_aw_pt_bi(struct zd_chip *chip, struct aw_pt_bi *s) reqs[1].addr = CR_PRE_TBTT; reqs[1].value = s->pre_tbtt; reqs[2].addr = CR_BCN_INTERVAL; - reqs[2].value = s->beacon_interval; + reqs[2].value = (s->beacon_interval & ~0xffff) | b_interval; return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); } -static int set_beacon_interval(struct zd_chip *chip, u32 interval) +static int set_beacon_interval(struct zd_chip *chip, u16 interval, + u8 dtim_period, int type) { int r; struct aw_pt_bi s; + u32 b_interval, mode_flag; ZD_ASSERT(mutex_is_locked(&chip->mutex)); + + if (interval > 0) { + switch (type) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_MESH_POINT: + mode_flag = BCN_MODE_IBSS; + break; + case NL80211_IFTYPE_AP: + mode_flag = BCN_MODE_AP; + break; + default: + mode_flag = 0; + break; + } + } else { + dtim_period = 0; + mode_flag = 0; + } + + b_interval = mode_flag | (dtim_period << 16) | interval; + + r = zd_iowrite32_locked(chip, b_interval, CR_BCN_INTERVAL); + if (r) + return r; r = get_aw_pt_bi(chip, &s); if (r) return r; - s.beacon_interval = interval; return set_aw_pt_bi(chip, &s); } -int zd_set_beacon_interval(struct zd_chip *chip, u32 interval) +int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period, + int type) { int r; mutex_lock(&chip->mutex); - r = set_beacon_interval(chip, interval); + r = set_beacon_interval(chip, interval, dtim_period, type); mutex_unlock(&chip->mutex); return r; } @@ -905,7 +955,7 @@ static int hw_init(struct zd_chip *chip) if (r) return r; - return set_beacon_interval(chip, 100); + return set_beacon_interval(chip, 100, 0, NL80211_IFTYPE_UNSPECIFIED); } static zd_addr_t fw_reg_addr(struct zd_chip *chip, u16 offset) @@ -1407,6 +1457,9 @@ void zd_chip_disable_int(struct zd_chip *chip) mutex_lock(&chip->mutex); zd_usb_disable_int(&chip->usb); mutex_unlock(&chip->mutex); + + /* cancel pending interrupt work */ + cancel_work_sync(&zd_chip_to_mac(chip)->process_intr); } int zd_chip_enable_rxtx(struct zd_chip *chip) @@ -1416,6 +1469,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip) mutex_lock(&chip->mutex); zd_usb_enable_tx(&chip->usb); r = zd_usb_enable_rx(&chip->usb); + zd_tx_watchdog_enable(&chip->usb); mutex_unlock(&chip->mutex); return r; } @@ -1423,6 +1477,7 @@ int zd_chip_enable_rxtx(struct zd_chip *chip) void zd_chip_disable_rxtx(struct zd_chip *chip) { mutex_lock(&chip->mutex); + zd_tx_watchdog_disable(&chip->usb); zd_usb_disable_rx(&chip->usb); zd_usb_disable_tx(&chip->usb); mutex_unlock(&chip->mutex); diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h index f8bbf7d302ae..14e4402a6111 100644 --- a/drivers/net/wireless/zd1211rw/zd_chip.h +++ b/drivers/net/wireless/zd1211rw/zd_chip.h @@ -546,6 +546,7 @@ enum { #define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \ RX_FILTER_CFEND | RX_FILTER_CFACK) +#define BCN_MODE_AP 0x1000000 #define BCN_MODE_IBSS 0x2000000 /* Monitor mode sets filter to 0xfffff */ @@ -881,6 +882,7 @@ static inline u8 _zd_chip_get_channel(struct zd_chip *chip) u8 zd_chip_get_channel(struct zd_chip *chip); int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain); int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr); +int zd_write_bssid(struct zd_chip *chip, const u8 *bssid); int zd_chip_switch_radio_on(struct zd_chip *chip); int zd_chip_switch_radio_off(struct zd_chip *chip); int zd_chip_enable_int(struct zd_chip *chip); @@ -920,7 +922,8 @@ enum led_status { int zd_chip_control_leds(struct zd_chip *chip, enum led_status status); -int zd_set_beacon_interval(struct zd_chip *chip, u32 interval); +int zd_set_beacon_interval(struct zd_chip *chip, u16 interval, u8 dtim_period, + int type); static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval) { diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h index 6ac597ffd3b9..5463ca9ebc01 100644 --- a/drivers/net/wireless/zd1211rw/zd_def.h +++ b/drivers/net/wireless/zd1211rw/zd_def.h @@ -45,7 +45,7 @@ typedef u16 __nocast zd_addr_t; #ifdef DEBUG # define ZD_ASSERT(x) \ do { \ - if (!(x)) { \ + if (unlikely(!(x))) { \ pr_debug("%s:%d ASSERT %s VIOLATED!\n", \ __FILE__, __LINE__, __stringify(x)); \ dump_stack(); \ diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 6107304cb94c..5037c8b2b415 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -138,6 +138,12 @@ static const struct ieee80211_channel zd_channels[] = { static void housekeeping_init(struct zd_mac *mac); static void housekeeping_enable(struct zd_mac *mac); static void housekeeping_disable(struct zd_mac *mac); +static void beacon_init(struct zd_mac *mac); +static void beacon_enable(struct zd_mac *mac); +static void beacon_disable(struct zd_mac *mac); +static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble); +static int zd_mac_config_beacon(struct ieee80211_hw *hw, + struct sk_buff *beacon); static int zd_reg2alpha2(u8 regdomain, char *alpha2) { @@ -231,6 +237,26 @@ static int set_rx_filter(struct zd_mac *mac) return zd_iowrite32(&mac->chip, CR_RX_FILTER, filter); } +static int set_mac_and_bssid(struct zd_mac *mac) +{ + int r; + + if (!mac->vif) + return -1; + + r = zd_write_mac_addr(&mac->chip, mac->vif->addr); + if (r) + return r; + + /* Vendor driver after setting MAC either sets BSSID for AP or + * filter for other modes. + */ + if (mac->type != NL80211_IFTYPE_AP) + return set_rx_filter(mac); + else + return zd_write_bssid(&mac->chip, mac->vif->addr); +} + static int set_mc_hash(struct zd_mac *mac) { struct zd_mc_hash hash; @@ -238,7 +264,7 @@ static int set_mc_hash(struct zd_mac *mac) return zd_chip_set_multicast_hash(&mac->chip, &hash); } -static int zd_op_start(struct ieee80211_hw *hw) +int zd_op_start(struct ieee80211_hw *hw) { struct zd_mac *mac = zd_hw_mac(hw); struct zd_chip *chip = &mac->chip; @@ -275,6 +301,8 @@ static int zd_op_start(struct ieee80211_hw *hw) goto disable_rxtx; housekeeping_enable(mac); + beacon_enable(mac); + set_bit(ZD_DEVICE_RUNNING, &mac->flags); return 0; disable_rxtx: zd_chip_disable_rxtx(chip); @@ -286,19 +314,22 @@ out: return r; } -static void zd_op_stop(struct ieee80211_hw *hw) +void zd_op_stop(struct ieee80211_hw *hw) { struct zd_mac *mac = zd_hw_mac(hw); struct zd_chip *chip = &mac->chip; struct sk_buff *skb; struct sk_buff_head *ack_wait_queue = &mac->ack_wait_queue; + clear_bit(ZD_DEVICE_RUNNING, &mac->flags); + /* The order here deliberately is a little different from the open() * method, since we need to make sure there is no opportunity for RX * frames to be processed by mac80211 after we have stopped it. */ zd_chip_disable_rxtx(chip); + beacon_disable(mac); housekeeping_disable(mac); flush_workqueue(zd_workqueue); @@ -311,6 +342,68 @@ static void zd_op_stop(struct ieee80211_hw *hw) dev_kfree_skb_any(skb); } +int zd_restore_settings(struct zd_mac *mac) +{ + struct sk_buff *beacon; + struct zd_mc_hash multicast_hash; + unsigned int short_preamble; + int r, beacon_interval, beacon_period; + u8 channel; + + dev_dbg_f(zd_mac_dev(mac), "\n"); + + spin_lock_irq(&mac->lock); + multicast_hash = mac->multicast_hash; + short_preamble = mac->short_preamble; + beacon_interval = mac->beacon.interval; + beacon_period = mac->beacon.period; + channel = mac->channel; + spin_unlock_irq(&mac->lock); + + r = set_mac_and_bssid(mac); + if (r < 0) { + dev_dbg_f(zd_mac_dev(mac), "set_mac_and_bssid failed, %d\n", r); + return r; + } + + r = zd_chip_set_channel(&mac->chip, channel); + if (r < 0) { + dev_dbg_f(zd_mac_dev(mac), "zd_chip_set_channel failed, %d\n", + r); + return r; + } + + set_rts_cts(mac, short_preamble); + + r = zd_chip_set_multicast_hash(&mac->chip, &multicast_hash); + if (r < 0) { + dev_dbg_f(zd_mac_dev(mac), + "zd_chip_set_multicast_hash failed, %d\n", r); + return r; + } + + if (mac->type == NL80211_IFTYPE_MESH_POINT || + mac->type == NL80211_IFTYPE_ADHOC || + mac->type == NL80211_IFTYPE_AP) { + if (mac->vif != NULL) { + beacon = ieee80211_beacon_get(mac->hw, mac->vif); + if (beacon) { + zd_mac_config_beacon(mac->hw, beacon); + kfree_skb(beacon); + } + } + + zd_set_beacon_interval(&mac->chip, beacon_interval, + beacon_period, mac->type); + + spin_lock_irq(&mac->lock); + mac->beacon.last_update = jiffies; + spin_unlock_irq(&mac->lock); + } + + return 0; +} + /** * zd_mac_tx_status - reports tx status of a packet if required * @hw - a &struct ieee80211_hw pointer @@ -574,64 +667,120 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, static int zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon) { struct zd_mac *mac = zd_hw_mac(hw); - int r; + int r, ret, num_cmds, req_pos = 0; u32 tmp, j = 0; /* 4 more bytes for tail CRC */ u32 full_len = beacon->len + 4; + unsigned long end_jiffies, message_jiffies; + struct zd_ioreq32 *ioreqs; - r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 0); + /* Alloc memory for full beacon write at once. */ + num_cmds = 1 + zd_chip_is_zd1211b(&mac->chip) + full_len; + ioreqs = kmalloc(num_cmds * sizeof(struct zd_ioreq32), GFP_KERNEL); + if (!ioreqs) + return -ENOMEM; + + mutex_lock(&mac->chip.mutex); + + r = zd_iowrite32_locked(&mac->chip, 0, CR_BCN_FIFO_SEMAPHORE); if (r < 0) - return r; - r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp); + goto out; + r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE); if (r < 0) - return r; + goto release_sema; + end_jiffies = jiffies + HZ / 2; /*~500ms*/ + message_jiffies = jiffies + HZ / 10; /*~100ms*/ while (tmp & 0x2) { - r = zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp); + r = zd_ioread32_locked(&mac->chip, &tmp, CR_BCN_FIFO_SEMAPHORE); if (r < 0) - return r; - if ((++j % 100) == 0) { - printk(KERN_ERR "CR_BCN_FIFO_SEMAPHORE not ready\n"); - if (j >= 500) { - printk(KERN_ERR "Giving up beacon config.\n"); - return -ETIMEDOUT; + goto release_sema; + if (time_is_before_eq_jiffies(message_jiffies)) { + message_jiffies = jiffies + HZ / 10; + dev_err(zd_mac_dev(mac), + "CR_BCN_FIFO_SEMAPHORE not ready\n"); + if (time_is_before_eq_jiffies(end_jiffies)) { + dev_err(zd_mac_dev(mac), + "Giving up beacon config.\n"); + r = -ETIMEDOUT; + goto reset_device; } } - msleep(1); + msleep(20); } - r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, full_len - 1); - if (r < 0) - return r; + ioreqs[req_pos].addr = CR_BCN_FIFO; + ioreqs[req_pos].value = full_len - 1; + req_pos++; if (zd_chip_is_zd1211b(&mac->chip)) { - r = zd_iowrite32(&mac->chip, CR_BCN_LENGTH, full_len - 1); - if (r < 0) - return r; + ioreqs[req_pos].addr = CR_BCN_LENGTH; + ioreqs[req_pos].value = full_len - 1; + req_pos++; } for (j = 0 ; j < beacon->len; j++) { - r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, - *((u8 *)(beacon->data + j))); - if (r < 0) - return r; + ioreqs[req_pos].addr = CR_BCN_FIFO; + ioreqs[req_pos].value = *((u8 *)(beacon->data + j)); + req_pos++; } for (j = 0; j < 4; j++) { - r = zd_iowrite32(&mac->chip, CR_BCN_FIFO, 0x0); - if (r < 0) - return r; + ioreqs[req_pos].addr = CR_BCN_FIFO; + ioreqs[req_pos].value = 0x0; + req_pos++; } - r = zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 1); - if (r < 0) - return r; + BUG_ON(req_pos != num_cmds); + + r = zd_iowrite32a_locked(&mac->chip, ioreqs, num_cmds); + +release_sema: + /* + * Try very hard to release device beacon semaphore, as otherwise + * device/driver can be left in unusable state. + */ + end_jiffies = jiffies + HZ / 2; /*~500ms*/ + ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE); + while (ret < 0) { + if (time_is_before_eq_jiffies(end_jiffies)) { + ret = -ETIMEDOUT; + break; + } + + msleep(20); + ret = zd_iowrite32_locked(&mac->chip, 1, CR_BCN_FIFO_SEMAPHORE); + } + + if (ret < 0) + dev_err(zd_mac_dev(mac), "Could not release " + "CR_BCN_FIFO_SEMAPHORE!\n"); + if (r < 0 || ret < 0) { + if (r >= 0) + r = ret; + goto out; + } /* 802.11b/g 2.4G CCK 1Mb * 802.11a, not yet implemented, uses different values (see GPL vendor * driver) */ - return zd_iowrite32(&mac->chip, CR_BCN_PLCP_CFG, 0x00000400 | - (full_len << 19)); + r = zd_iowrite32_locked(&mac->chip, 0x00000400 | (full_len << 19), + CR_BCN_PLCP_CFG); +out: + mutex_unlock(&mac->chip.mutex); + kfree(ioreqs); + return r; + +reset_device: + mutex_unlock(&mac->chip.mutex); + kfree(ioreqs); + + /* semaphore stuck, reset device to avoid fw freeze later */ + dev_warn(zd_mac_dev(mac), "CR_BCN_FIFO_SEMAPHORE stuck, " + "reseting device..."); + usb_queue_reset_device(mac->chip.usb.intf); + + return r; } static int fill_ctrlset(struct zd_mac *mac, @@ -701,7 +850,7 @@ static int fill_ctrlset(struct zd_mac *mac, * control block of the skbuff will be initialized. If necessary the incoming * mac80211 queues will be stopped. */ -static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct zd_mac *mac = zd_hw_mac(hw); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -716,11 +865,10 @@ static int zd_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) r = zd_usb_tx(&mac->chip.usb, skb); if (r) goto fail; - return 0; + return; fail: dev_kfree_skb(skb); - return 0; } /** @@ -779,6 +927,13 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr, mac->ack_pending = 1; mac->ack_signal = stats->signal; + + /* Prevent pending tx-packet on AP-mode */ + if (mac->type == NL80211_IFTYPE_AP) { + skb = __skb_dequeue(q); + zd_mac_tx_status(hw, skb, mac->ack_signal, NULL); + mac->ack_pending = 0; + } } spin_unlock_irqrestore(&q->lock, flags); @@ -882,13 +1037,16 @@ static int zd_op_add_interface(struct ieee80211_hw *hw, case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP: mac->type = vif->type; break; default: return -EOPNOTSUPP; } - return zd_write_mac_addr(&mac->chip, vif->addr); + mac->vif = vif; + + return set_mac_and_bssid(mac); } static void zd_op_remove_interface(struct ieee80211_hw *hw, @@ -896,7 +1054,8 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw, { struct zd_mac *mac = zd_hw_mac(hw); mac->type = NL80211_IFTYPE_UNSPECIFIED; - zd_set_beacon_interval(&mac->chip, 0); + mac->vif = NULL; + zd_set_beacon_interval(&mac->chip, 0, 0, NL80211_IFTYPE_UNSPECIFIED); zd_write_mac_addr(&mac->chip, NULL); } @@ -905,49 +1064,67 @@ static int zd_op_config(struct ieee80211_hw *hw, u32 changed) struct zd_mac *mac = zd_hw_mac(hw); struct ieee80211_conf *conf = &hw->conf; + spin_lock_irq(&mac->lock); + mac->channel = conf->channel->hw_value; + spin_unlock_irq(&mac->lock); + return zd_chip_set_channel(&mac->chip, conf->channel->hw_value); } -static void zd_process_intr(struct work_struct *work) +static void zd_beacon_done(struct zd_mac *mac) { - u16 int_status; - struct zd_mac *mac = container_of(work, struct zd_mac, process_intr); + struct sk_buff *skb, *beacon; - int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer+4)); - if (int_status & INT_CFG_NEXT_BCN) - dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n"); - else - dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n"); - - zd_chip_enable_hwint(&mac->chip); -} + if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags)) + return; + if (!mac->vif || mac->vif->type != NL80211_IFTYPE_AP) + return; + /* + * Send out buffered broad- and multicast frames. + */ + while (!ieee80211_queue_stopped(mac->hw, 0)) { + skb = ieee80211_get_buffered_bc(mac->hw, mac->vif); + if (!skb) + break; + zd_op_tx(mac->hw, skb); + } -static void set_multicast_hash_handler(struct work_struct *work) -{ - struct zd_mac *mac = - container_of(work, struct zd_mac, set_multicast_hash_work); - struct zd_mc_hash hash; + /* + * Fetch next beacon so that tim_count is updated. + */ + beacon = ieee80211_beacon_get(mac->hw, mac->vif); + if (beacon) { + zd_mac_config_beacon(mac->hw, beacon); + kfree_skb(beacon); + } spin_lock_irq(&mac->lock); - hash = mac->multicast_hash; + mac->beacon.last_update = jiffies; spin_unlock_irq(&mac->lock); - - zd_chip_set_multicast_hash(&mac->chip, &hash); } -static void set_rx_filter_handler(struct work_struct *work) +static void zd_process_intr(struct work_struct *work) { - struct zd_mac *mac = - container_of(work, struct zd_mac, set_rx_filter_work); - int r; + u16 int_status; + unsigned long flags; + struct zd_mac *mac = container_of(work, struct zd_mac, process_intr); - dev_dbg_f(zd_mac_dev(mac), "\n"); - r = set_rx_filter(mac); - if (r) - dev_err(zd_mac_dev(mac), "set_rx_filter_handler error %d\n", r); + spin_lock_irqsave(&mac->lock, flags); + int_status = le16_to_cpu(*(__le16 *)(mac->intr_buffer + 4)); + spin_unlock_irqrestore(&mac->lock, flags); + + if (int_status & INT_CFG_NEXT_BCN) { + /*dev_dbg_f_limit(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");*/ + zd_beacon_done(mac); + } else { + dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n"); + } + + zd_chip_enable_hwint(&mac->chip); } + static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { @@ -979,6 +1156,7 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw, }; struct zd_mac *mac = zd_hw_mac(hw); unsigned long flags; + int r; /* Only deal with supported flags */ changed_flags &= SUPPORTED_FIF_FLAGS; @@ -1000,11 +1178,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw, mac->multicast_hash = hash; spin_unlock_irqrestore(&mac->lock, flags); - /* XXX: these can be called here now, can sleep now! */ - queue_work(zd_workqueue, &mac->set_multicast_hash_work); + zd_chip_set_multicast_hash(&mac->chip, &hash); - if (changed_flags & FIF_CONTROL) - queue_work(zd_workqueue, &mac->set_rx_filter_work); + if (changed_flags & FIF_CONTROL) { + r = set_rx_filter(mac); + if (r) + dev_err(zd_mac_dev(mac), "set_rx_filter error %d\n", r); + } /* no handling required for FIF_OTHER_BSS as we don't currently * do BSSID filtering */ @@ -1016,20 +1196,9 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw, * time. */ } -static void set_rts_cts_work(struct work_struct *work) +static void set_rts_cts(struct zd_mac *mac, unsigned int short_preamble) { - struct zd_mac *mac = - container_of(work, struct zd_mac, set_rts_cts_work); - unsigned long flags; - unsigned int short_preamble; - mutex_lock(&mac->chip.mutex); - - spin_lock_irqsave(&mac->lock, flags); - mac->updating_rts_rate = 0; - short_preamble = mac->short_preamble; - spin_unlock_irqrestore(&mac->lock, flags); - zd_chip_set_rts_cts_rate_locked(&mac->chip, short_preamble); mutex_unlock(&mac->chip.mutex); } @@ -1040,33 +1209,42 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw, u32 changes) { struct zd_mac *mac = zd_hw_mac(hw); - unsigned long flags; int associated; dev_dbg_f(zd_mac_dev(mac), "changes: %x\n", changes); if (mac->type == NL80211_IFTYPE_MESH_POINT || - mac->type == NL80211_IFTYPE_ADHOC) { + mac->type == NL80211_IFTYPE_ADHOC || + mac->type == NL80211_IFTYPE_AP) { associated = true; if (changes & BSS_CHANGED_BEACON) { struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); if (beacon) { + zd_chip_disable_hwint(&mac->chip); zd_mac_config_beacon(hw, beacon); + zd_chip_enable_hwint(&mac->chip); kfree_skb(beacon); } } if (changes & BSS_CHANGED_BEACON_ENABLED) { - u32 interval; + u16 interval = 0; + u8 period = 0; - if (bss_conf->enable_beacon) - interval = BCN_MODE_IBSS | - bss_conf->beacon_int; - else - interval = 0; + if (bss_conf->enable_beacon) { + period = bss_conf->dtim_period; + interval = bss_conf->beacon_int; + } - zd_set_beacon_interval(&mac->chip, interval); + spin_lock_irq(&mac->lock); + mac->beacon.period = period; + mac->beacon.interval = interval; + mac->beacon.last_update = jiffies; + spin_unlock_irq(&mac->lock); + + zd_set_beacon_interval(&mac->chip, interval, period, + mac->type); } } else associated = is_valid_ether_addr(bss_conf->bssid); @@ -1078,15 +1256,11 @@ static void zd_op_bss_info_changed(struct ieee80211_hw *hw, /* TODO: do hardware bssid filtering */ if (changes & BSS_CHANGED_ERP_PREAMBLE) { - spin_lock_irqsave(&mac->lock, flags); + spin_lock_irq(&mac->lock); mac->short_preamble = bss_conf->use_short_preamble; - if (!mac->updating_rts_rate) { - mac->updating_rts_rate = 1; - /* FIXME: should disable TX here, until work has - * completed and RTS_CTS reg is updated */ - queue_work(zd_workqueue, &mac->set_rts_cts_work); - } - spin_unlock_irqrestore(&mac->lock, flags); + spin_unlock_irq(&mac->lock); + + set_rts_cts(mac, bss_conf->use_short_preamble); } } @@ -1138,12 +1312,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf) hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band; hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | - IEEE80211_HW_SIGNAL_UNSPEC; + IEEE80211_HW_SIGNAL_UNSPEC | + IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_ADHOC); + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP); hw->max_signal = 100; hw->queues = 1; @@ -1160,15 +1336,82 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf) zd_chip_init(&mac->chip, hw, intf); housekeeping_init(mac); - INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler); - INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work); - INIT_WORK(&mac->set_rx_filter_work, set_rx_filter_handler); + beacon_init(mac); INIT_WORK(&mac->process_intr, zd_process_intr); SET_IEEE80211_DEV(hw, &intf->dev); return hw; } +#define BEACON_WATCHDOG_DELAY round_jiffies_relative(HZ) + +static void beacon_watchdog_handler(struct work_struct *work) +{ + struct zd_mac *mac = + container_of(work, struct zd_mac, beacon.watchdog_work.work); + struct sk_buff *beacon; + unsigned long timeout; + int interval, period; + + if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags)) + goto rearm; + if (mac->type != NL80211_IFTYPE_AP || !mac->vif) + goto rearm; + + spin_lock_irq(&mac->lock); + interval = mac->beacon.interval; + period = mac->beacon.period; + timeout = mac->beacon.last_update + msecs_to_jiffies(interval) + HZ; + spin_unlock_irq(&mac->lock); + + if (interval > 0 && time_is_before_jiffies(timeout)) { + dev_dbg_f(zd_mac_dev(mac), "beacon interrupt stalled, " + "restarting. " + "(interval: %d, dtim: %d)\n", + interval, period); + + zd_chip_disable_hwint(&mac->chip); + + beacon = ieee80211_beacon_get(mac->hw, mac->vif); + if (beacon) { + zd_mac_config_beacon(mac->hw, beacon); + kfree_skb(beacon); + } + + zd_set_beacon_interval(&mac->chip, interval, period, mac->type); + + zd_chip_enable_hwint(&mac->chip); + + spin_lock_irq(&mac->lock); + mac->beacon.last_update = jiffies; + spin_unlock_irq(&mac->lock); + } + +rearm: + queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work, + BEACON_WATCHDOG_DELAY); +} + +static void beacon_init(struct zd_mac *mac) +{ + INIT_DELAYED_WORK(&mac->beacon.watchdog_work, beacon_watchdog_handler); +} + +static void beacon_enable(struct zd_mac *mac) +{ + dev_dbg_f(zd_mac_dev(mac), "\n"); + + mac->beacon.last_update = jiffies; + queue_delayed_work(zd_workqueue, &mac->beacon.watchdog_work, + BEACON_WATCHDOG_DELAY); +} + +static void beacon_disable(struct zd_mac *mac) +{ + dev_dbg_f(zd_mac_dev(mac), "\n"); + cancel_delayed_work_sync(&mac->beacon.watchdog_work); +} + #define LINK_LED_WORK_DELAY HZ static void link_led_handler(struct work_struct *work) @@ -1179,6 +1422,9 @@ static void link_led_handler(struct work_struct *work) int is_associated; int r; + if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags)) + goto requeue; + spin_lock_irq(&mac->lock); is_associated = mac->associated; spin_unlock_irq(&mac->lock); @@ -1188,6 +1434,7 @@ static void link_led_handler(struct work_struct *work) if (r) dev_dbg_f(zd_mac_dev(mac), "zd_chip_control_leds error %d\n", r); +requeue: queue_delayed_work(zd_workqueue, &mac->housekeeping.link_led_work, LINK_LED_WORK_DELAY); } diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h index a6d86b996c79..f8c93c3fe755 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zd1211rw/zd_mac.h @@ -163,6 +163,17 @@ struct housekeeping { struct delayed_work link_led_work; }; +struct beacon { + struct delayed_work watchdog_work; + unsigned long last_update; + u16 interval; + u8 period; +}; + +enum zd_device_flags { + ZD_DEVICE_RUNNING, +}; + #define ZD_MAC_STATS_BUFFER_SIZE 16 #define ZD_MAC_MAX_ACK_WAITERS 50 @@ -172,17 +183,19 @@ struct zd_mac { spinlock_t lock; spinlock_t intr_lock; struct ieee80211_hw *hw; + struct ieee80211_vif *vif; struct housekeeping housekeeping; - struct work_struct set_multicast_hash_work; + struct beacon beacon; struct work_struct set_rts_cts_work; - struct work_struct set_rx_filter_work; struct work_struct process_intr; struct zd_mc_hash multicast_hash; u8 intr_buffer[USB_MAX_EP_INT_BUFFER]; u8 regdomain; u8 default_regdomain; + u8 channel; int type; int associated; + unsigned long flags; struct sk_buff_head ack_wait_queue; struct ieee80211_channel channels[14]; struct ieee80211_rate rates[12]; @@ -191,9 +204,6 @@ struct zd_mac { /* Short preamble (used for RTS/CTS) */ unsigned int short_preamble:1; - /* flags to indicate update in progress */ - unsigned int updating_rts_rate:1; - /* whether to pass frames with CRC errors to stack */ unsigned int pass_failed_fcs:1; @@ -304,6 +314,10 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length); void zd_mac_tx_failed(struct urb *urb); void zd_mac_tx_to_dev(struct sk_buff *skb, int error); +int zd_op_start(struct ieee80211_hw *hw); +void zd_op_stop(struct ieee80211_hw *hw); +int zd_restore_settings(struct zd_mac *mac); + #ifdef DEBUG void zd_dump_rx_status(const struct rx_status *status); #else diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index 06041cb1c422..81e80489a052 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c @@ -377,8 +377,10 @@ static inline void handle_regs_int(struct urb *urb) int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2)); if (int_num == CR_INTERRUPT) { struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context)); + spin_lock(&mac->lock); memcpy(&mac->intr_buffer, urb->transfer_buffer, USB_MAX_EP_INT_BUFFER); + spin_unlock(&mac->lock); schedule_work(&mac->process_intr); } else if (intr->read_regs_enabled) { intr->read_regs.length = len = urb->actual_length; @@ -409,8 +411,10 @@ static void int_urb_complete(struct urb *urb) case -ENOENT: case -ECONNRESET: case -EPIPE: - goto kfree; + dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); + return; default: + dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); goto resubmit; } @@ -441,12 +445,11 @@ static void int_urb_complete(struct urb *urb) resubmit: r = usb_submit_urb(urb, GFP_ATOMIC); if (r) { - dev_dbg_f(urb_dev(urb), "resubmit urb %p\n", urb); - goto kfree; + dev_dbg_f(urb_dev(urb), "error: resubmit urb %p err code %d\n", + urb, r); + /* TODO: add worker to reset intr->urb */ } return; -kfree: - kfree(urb->transfer_buffer); } static inline int int_urb_interval(struct usb_device *udev) @@ -477,9 +480,8 @@ static inline int usb_int_enabled(struct zd_usb *usb) int zd_usb_enable_int(struct zd_usb *usb) { int r; - struct usb_device *udev; + struct usb_device *udev = zd_usb_to_usbdev(usb); struct zd_usb_interrupt *intr = &usb->intr; - void *transfer_buffer = NULL; struct urb *urb; dev_dbg_f(zd_usb_dev(usb), "\n"); @@ -500,20 +502,21 @@ int zd_usb_enable_int(struct zd_usb *usb) intr->urb = urb; spin_unlock_irq(&intr->lock); - /* TODO: make it a DMA buffer */ r = -ENOMEM; - transfer_buffer = kmalloc(USB_MAX_EP_INT_BUFFER, GFP_KERNEL); - if (!transfer_buffer) { + intr->buffer = usb_alloc_coherent(udev, USB_MAX_EP_INT_BUFFER, + GFP_KERNEL, &intr->buffer_dma); + if (!intr->buffer) { dev_dbg_f(zd_usb_dev(usb), "couldn't allocate transfer_buffer\n"); goto error_set_urb_null; } - udev = zd_usb_to_usbdev(usb); usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, EP_INT_IN), - transfer_buffer, USB_MAX_EP_INT_BUFFER, + intr->buffer, USB_MAX_EP_INT_BUFFER, int_urb_complete, usb, intr->interval); + urb->transfer_dma = intr->buffer_dma; + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; dev_dbg_f(zd_usb_dev(usb), "submit urb %p\n", intr->urb); r = usb_submit_urb(urb, GFP_KERNEL); @@ -525,7 +528,8 @@ int zd_usb_enable_int(struct zd_usb *usb) return 0; error: - kfree(transfer_buffer); + usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER, + intr->buffer, intr->buffer_dma); error_set_urb_null: spin_lock_irq(&intr->lock); intr->urb = NULL; @@ -539,8 +543,11 @@ out: void zd_usb_disable_int(struct zd_usb *usb) { unsigned long flags; + struct usb_device *udev = zd_usb_to_usbdev(usb); struct zd_usb_interrupt *intr = &usb->intr; struct urb *urb; + void *buffer; + dma_addr_t buffer_dma; spin_lock_irqsave(&intr->lock, flags); urb = intr->urb; @@ -549,11 +556,18 @@ void zd_usb_disable_int(struct zd_usb *usb) return; } intr->urb = NULL; + buffer = intr->buffer; + buffer_dma = intr->buffer_dma; + intr->buffer = NULL; spin_unlock_irqrestore(&intr->lock, flags); usb_kill_urb(urb); dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb); usb_free_urb(urb); + + if (buffer) + usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER, + buffer, buffer_dma); } static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, @@ -601,6 +615,7 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, static void rx_urb_complete(struct urb *urb) { + int r; struct zd_usb *usb; struct zd_usb_rx *rx; const u8 *buffer; @@ -615,6 +630,7 @@ static void rx_urb_complete(struct urb *urb) case -ENOENT: case -ECONNRESET: case -EPIPE: + dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); return; default: dev_dbg_f(urb_dev(urb), "urb %p error %d\n", urb, urb->status); @@ -626,6 +642,8 @@ static void rx_urb_complete(struct urb *urb) usb = urb->context; rx = &usb->rx; + zd_usb_reset_rx_idle_timer(usb); + if (length%rx->usb_packet_size > rx->usb_packet_size-4) { /* If there is an old first fragment, we don't care. */ dev_dbg_f(urb_dev(urb), "*** first fragment ***\n"); @@ -654,7 +672,9 @@ static void rx_urb_complete(struct urb *urb) } resubmit: - usb_submit_urb(urb, GFP_ATOMIC); + r = usb_submit_urb(urb, GFP_ATOMIC); + if (r) + dev_dbg_f(urb_dev(urb), "urb %p resubmit error %d\n", urb, r); } static struct urb *alloc_rx_urb(struct zd_usb *usb) @@ -690,7 +710,7 @@ static void free_rx_urb(struct urb *urb) usb_free_urb(urb); } -int zd_usb_enable_rx(struct zd_usb *usb) +static int __zd_usb_enable_rx(struct zd_usb *usb) { int i, r; struct zd_usb_rx *rx = &usb->rx; @@ -742,7 +762,21 @@ error: return r; } -void zd_usb_disable_rx(struct zd_usb *usb) +int zd_usb_enable_rx(struct zd_usb *usb) +{ + int r; + struct zd_usb_rx *rx = &usb->rx; + + mutex_lock(&rx->setup_mutex); + r = __zd_usb_enable_rx(usb); + mutex_unlock(&rx->setup_mutex); + + zd_usb_reset_rx_idle_timer(usb); + + return r; +} + +static void __zd_usb_disable_rx(struct zd_usb *usb) { int i; unsigned long flags; @@ -769,6 +803,40 @@ void zd_usb_disable_rx(struct zd_usb *usb) spin_unlock_irqrestore(&rx->lock, flags); } +void zd_usb_disable_rx(struct zd_usb *usb) +{ + struct zd_usb_rx *rx = &usb->rx; + + mutex_lock(&rx->setup_mutex); + __zd_usb_disable_rx(usb); + mutex_unlock(&rx->setup_mutex); + + cancel_delayed_work_sync(&rx->idle_work); +} + +static void zd_usb_reset_rx(struct zd_usb *usb) +{ + bool do_reset; + struct zd_usb_rx *rx = &usb->rx; + unsigned long flags; + + mutex_lock(&rx->setup_mutex); + + spin_lock_irqsave(&rx->lock, flags); + do_reset = rx->urbs != NULL; + spin_unlock_irqrestore(&rx->lock, flags); + + if (do_reset) { + __zd_usb_disable_rx(usb); + __zd_usb_enable_rx(usb); + } + + mutex_unlock(&rx->setup_mutex); + + if (do_reset) + zd_usb_reset_rx_idle_timer(usb); +} + /** * zd_usb_disable_tx - disable transmission * @usb: the zd1211rw-private USB structure @@ -779,19 +847,21 @@ void zd_usb_disable_tx(struct zd_usb *usb) { struct zd_usb_tx *tx = &usb->tx; unsigned long flags; - struct list_head *pos, *n; + + atomic_set(&tx->enabled, 0); + + /* kill all submitted tx-urbs */ + usb_kill_anchored_urbs(&tx->submitted); spin_lock_irqsave(&tx->lock, flags); - list_for_each_safe(pos, n, &tx->free_urb_list) { - list_del(pos); - usb_free_urb(list_entry(pos, struct urb, urb_list)); - } - tx->enabled = 0; + WARN_ON(!skb_queue_empty(&tx->submitted_skbs)); + WARN_ON(tx->submitted_urbs != 0); tx->submitted_urbs = 0; + spin_unlock_irqrestore(&tx->lock, flags); + /* The stopped state is ignored, relying on ieee80211_wake_queues() * in a potentionally following zd_usb_enable_tx(). */ - spin_unlock_irqrestore(&tx->lock, flags); } /** @@ -807,63 +877,13 @@ void zd_usb_enable_tx(struct zd_usb *usb) struct zd_usb_tx *tx = &usb->tx; spin_lock_irqsave(&tx->lock, flags); - tx->enabled = 1; + atomic_set(&tx->enabled, 1); tx->submitted_urbs = 0; ieee80211_wake_queues(zd_usb_to_hw(usb)); tx->stopped = 0; spin_unlock_irqrestore(&tx->lock, flags); } -/** - * alloc_tx_urb - provides an tx URB - * @usb: a &struct zd_usb pointer - * - * Allocates a new URB. If possible takes the urb from the free list in - * usb->tx. - */ -static struct urb *alloc_tx_urb(struct zd_usb *usb) -{ - struct zd_usb_tx *tx = &usb->tx; - unsigned long flags; - struct list_head *entry; - struct urb *urb; - - spin_lock_irqsave(&tx->lock, flags); - if (list_empty(&tx->free_urb_list)) { - urb = usb_alloc_urb(0, GFP_ATOMIC); - goto out; - } - entry = tx->free_urb_list.next; - list_del(entry); - urb = list_entry(entry, struct urb, urb_list); -out: - spin_unlock_irqrestore(&tx->lock, flags); - return urb; -} - -/** - * free_tx_urb - frees a used tx URB - * @usb: a &struct zd_usb pointer - * @urb: URB to be freed - * - * Frees the transmission URB, which means to put it on the free URB - * list. - */ -static void free_tx_urb(struct zd_usb *usb, struct urb *urb) -{ - struct zd_usb_tx *tx = &usb->tx; - unsigned long flags; - - spin_lock_irqsave(&tx->lock, flags); - if (!tx->enabled) { - usb_free_urb(urb); - goto out; - } - list_add(&urb->urb_list, &tx->free_urb_list); -out: - spin_unlock_irqrestore(&tx->lock, flags); -} - static void tx_dec_submitted_urbs(struct zd_usb *usb) { struct zd_usb_tx *tx = &usb->tx; @@ -905,6 +925,16 @@ static void tx_urb_complete(struct urb *urb) struct sk_buff *skb; struct ieee80211_tx_info *info; struct zd_usb *usb; + struct zd_usb_tx *tx; + + skb = (struct sk_buff *)urb->context; + info = IEEE80211_SKB_CB(skb); + /* + * grab 'usb' pointer before handing off the skb (since + * it might be freed by zd_mac_tx_to_dev or mac80211) + */ + usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb; + tx = &usb->tx; switch (urb->status) { case 0: @@ -922,20 +952,16 @@ static void tx_urb_complete(struct urb *urb) goto resubmit; } free_urb: - skb = (struct sk_buff *)urb->context; - /* - * grab 'usb' pointer before handing off the skb (since - * it might be freed by zd_mac_tx_to_dev or mac80211) - */ - info = IEEE80211_SKB_CB(skb); - usb = &zd_hw_mac(info->rate_driver_data[0])->chip.usb; + skb_unlink(skb, &usb->tx.submitted_skbs); zd_mac_tx_to_dev(skb, urb->status); - free_tx_urb(usb, urb); + usb_free_urb(urb); tx_dec_submitted_urbs(usb); return; resubmit: + usb_anchor_urb(urb, &tx->submitted); r = usb_submit_urb(urb, GFP_ATOMIC); if (r) { + usb_unanchor_urb(urb); dev_dbg_f(urb_dev(urb), "error resubmit urb %p %d\n", urb, r); goto free_urb; } @@ -956,10 +982,17 @@ resubmit: int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb) { int r; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct usb_device *udev = zd_usb_to_usbdev(usb); struct urb *urb; + struct zd_usb_tx *tx = &usb->tx; + + if (!atomic_read(&tx->enabled)) { + r = -ENOENT; + goto out; + } - urb = alloc_tx_urb(usb); + urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { r = -ENOMEM; goto out; @@ -968,17 +1001,118 @@ int zd_usb_tx(struct zd_usb *usb, struct sk_buff *skb) usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT), skb->data, skb->len, tx_urb_complete, skb); + info->rate_driver_data[1] = (void *)jiffies; + skb_queue_tail(&tx->submitted_skbs, skb); + usb_anchor_urb(urb, &tx->submitted); + r = usb_submit_urb(urb, GFP_ATOMIC); - if (r) + if (r) { + dev_dbg_f(zd_usb_dev(usb), "error submit urb %p %d\n", urb, r); + usb_unanchor_urb(urb); + skb_unlink(skb, &tx->submitted_skbs); goto error; + } tx_inc_submitted_urbs(usb); return 0; error: - free_tx_urb(usb, urb); + usb_free_urb(urb); out: return r; } +static bool zd_tx_timeout(struct zd_usb *usb) +{ + struct zd_usb_tx *tx = &usb->tx; + struct sk_buff_head *q = &tx->submitted_skbs; + struct sk_buff *skb, *skbnext; + struct ieee80211_tx_info *info; + unsigned long flags, trans_start; + bool have_timedout = false; + + spin_lock_irqsave(&q->lock, flags); + skb_queue_walk_safe(q, skb, skbnext) { + info = IEEE80211_SKB_CB(skb); + trans_start = (unsigned long)info->rate_driver_data[1]; + + if (time_is_before_jiffies(trans_start + ZD_TX_TIMEOUT)) { + have_timedout = true; + break; + } + } + spin_unlock_irqrestore(&q->lock, flags); + + return have_timedout; +} + +static void zd_tx_watchdog_handler(struct work_struct *work) +{ + struct zd_usb *usb = + container_of(work, struct zd_usb, tx.watchdog_work.work); + struct zd_usb_tx *tx = &usb->tx; + + if (!atomic_read(&tx->enabled) || !tx->watchdog_enabled) + goto out; + if (!zd_tx_timeout(usb)) + goto out; + + /* TX halted, try reset */ + dev_warn(zd_usb_dev(usb), "TX-stall detected, reseting device..."); + + usb_queue_reset_device(usb->intf); + + /* reset will stop this worker, don't rearm */ + return; +out: + queue_delayed_work(zd_workqueue, &tx->watchdog_work, + ZD_TX_WATCHDOG_INTERVAL); +} + +void zd_tx_watchdog_enable(struct zd_usb *usb) +{ + struct zd_usb_tx *tx = &usb->tx; + + if (!tx->watchdog_enabled) { + dev_dbg_f(zd_usb_dev(usb), "\n"); + queue_delayed_work(zd_workqueue, &tx->watchdog_work, + ZD_TX_WATCHDOG_INTERVAL); + tx->watchdog_enabled = 1; + } +} + +void zd_tx_watchdog_disable(struct zd_usb *usb) +{ + struct zd_usb_tx *tx = &usb->tx; + + if (tx->watchdog_enabled) { + dev_dbg_f(zd_usb_dev(usb), "\n"); + tx->watchdog_enabled = 0; + cancel_delayed_work_sync(&tx->watchdog_work); + } +} + +static void zd_rx_idle_timer_handler(struct work_struct *work) +{ + struct zd_usb *usb = + container_of(work, struct zd_usb, rx.idle_work.work); + struct zd_mac *mac = zd_usb_to_mac(usb); + + if (!test_bit(ZD_DEVICE_RUNNING, &mac->flags)) + return; + + dev_dbg_f(zd_usb_dev(usb), "\n"); + + /* 30 seconds since last rx, reset rx */ + zd_usb_reset_rx(usb); +} + +void zd_usb_reset_rx_idle_timer(struct zd_usb *usb) +{ + struct zd_usb_rx *rx = &usb->rx; + + cancel_delayed_work(&rx->idle_work); + queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL); +} + static inline void init_usb_interrupt(struct zd_usb *usb) { struct zd_usb_interrupt *intr = &usb->intr; @@ -993,22 +1127,27 @@ static inline void init_usb_rx(struct zd_usb *usb) { struct zd_usb_rx *rx = &usb->rx; spin_lock_init(&rx->lock); + mutex_init(&rx->setup_mutex); if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) { rx->usb_packet_size = 512; } else { rx->usb_packet_size = 64; } ZD_ASSERT(rx->fragment_length == 0); + INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler); } static inline void init_usb_tx(struct zd_usb *usb) { struct zd_usb_tx *tx = &usb->tx; spin_lock_init(&tx->lock); - tx->enabled = 0; + atomic_set(&tx->enabled, 0); tx->stopped = 0; - INIT_LIST_HEAD(&tx->free_urb_list); + skb_queue_head_init(&tx->submitted_skbs); + init_usb_anchor(&tx->submitted); tx->submitted_urbs = 0; + tx->watchdog_enabled = 0; + INIT_DELAYED_WORK(&tx->watchdog_work, zd_tx_watchdog_handler); } void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw, @@ -1017,6 +1156,7 @@ void zd_usb_init(struct zd_usb *usb, struct ieee80211_hw *hw, memset(usb, 0, sizeof(*usb)); usb->intf = usb_get_intf(intf); usb_set_intfdata(usb->intf, hw); + init_usb_anchor(&usb->submitted_cmds); init_usb_interrupt(usb); init_usb_tx(usb); init_usb_rx(usb); @@ -1240,6 +1380,7 @@ static void disconnect(struct usb_interface *intf) ieee80211_unregister_hw(hw); /* Just in case something has gone wrong! */ + zd_usb_disable_tx(usb); zd_usb_disable_rx(usb); zd_usb_disable_int(usb); @@ -1255,11 +1396,92 @@ static void disconnect(struct usb_interface *intf) dev_dbg(&intf->dev, "disconnected\n"); } +static void zd_usb_resume(struct zd_usb *usb) +{ + struct zd_mac *mac = zd_usb_to_mac(usb); + int r; + + dev_dbg_f(zd_usb_dev(usb), "\n"); + + r = zd_op_start(zd_usb_to_hw(usb)); + if (r < 0) { + dev_warn(zd_usb_dev(usb), "Device resume failed " + "with error code %d. Retrying...\n", r); + if (usb->was_running) + set_bit(ZD_DEVICE_RUNNING, &mac->flags); + usb_queue_reset_device(usb->intf); + return; + } + + if (mac->type != NL80211_IFTYPE_UNSPECIFIED) { + r = zd_restore_settings(mac); + if (r < 0) { + dev_dbg(zd_usb_dev(usb), + "failed to restore settings, %d\n", r); + return; + } + } +} + +static void zd_usb_stop(struct zd_usb *usb) +{ + dev_dbg_f(zd_usb_dev(usb), "\n"); + + zd_op_stop(zd_usb_to_hw(usb)); + + zd_usb_disable_tx(usb); + zd_usb_disable_rx(usb); + zd_usb_disable_int(usb); + + usb->initialized = 0; +} + +static int pre_reset(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(intf); + struct zd_mac *mac; + struct zd_usb *usb; + + if (!hw || intf->condition != USB_INTERFACE_BOUND) + return 0; + + mac = zd_hw_mac(hw); + usb = &mac->chip.usb; + + usb->was_running = test_bit(ZD_DEVICE_RUNNING, &mac->flags); + + zd_usb_stop(usb); + + mutex_lock(&mac->chip.mutex); + return 0; +} + +static int post_reset(struct usb_interface *intf) +{ + struct ieee80211_hw *hw = usb_get_intfdata(intf); + struct zd_mac *mac; + struct zd_usb *usb; + + if (!hw || intf->condition != USB_INTERFACE_BOUND) + return 0; + + mac = zd_hw_mac(hw); + usb = &mac->chip.usb; + + mutex_unlock(&mac->chip.mutex); + + if (usb->was_running) + zd_usb_resume(usb); + return 0; +} + static struct usb_driver driver = { .name = KBUILD_MODNAME, .id_table = usb_ids, .probe = probe, .disconnect = disconnect, + .pre_reset = pre_reset, + .post_reset = post_reset, }; struct workqueue_struct *zd_workqueue; @@ -1393,30 +1615,35 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, return -EWOULDBLOCK; } if (!usb_int_enabled(usb)) { - dev_dbg_f(zd_usb_dev(usb), + dev_dbg_f(zd_usb_dev(usb), "error: usb interrupt not enabled\n"); return -EWOULDBLOCK; } + ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex)); + BUILD_BUG_ON(sizeof(struct usb_req_read_regs) + USB_MAX_IOREAD16_COUNT * + sizeof(__le16) > sizeof(usb->req_buf)); + BUG_ON(sizeof(struct usb_req_read_regs) + count * sizeof(__le16) > + sizeof(usb->req_buf)); + req_len = sizeof(struct usb_req_read_regs) + count * sizeof(__le16); - req = kmalloc(req_len, GFP_KERNEL); - if (!req) - return -ENOMEM; + req = (void *)usb->req_buf; + req->id = cpu_to_le16(USB_REQ_READ_REGS); for (i = 0; i < count; i++) req->addr[i] = cpu_to_le16((u16)addresses[i]); udev = zd_usb_to_usbdev(usb); prepare_read_regs_int(usb); - r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), - req, req_len, &actual_req_len, 1000 /* ms */); + r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT), + req, req_len, &actual_req_len, 50 /* ms */); if (r) { dev_dbg_f(zd_usb_dev(usb), - "error in usb_bulk_msg(). Error number %d\n", r); + "error in usb_interrupt_msg(). Error number %d\n", r); goto error; } if (req_len != actual_req_len) { - dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()\n" + dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n" " req_len %d != actual_req_len %d\n", req_len, actual_req_len); r = -EIO; @@ -1424,7 +1651,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, } timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion, - msecs_to_jiffies(1000)); + msecs_to_jiffies(50)); if (!timeout) { disable_read_regs_int(usb); dev_dbg_f(zd_usb_dev(usb), "read timed out\n"); @@ -1434,17 +1661,106 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, r = get_results(usb, values, req, count); error: - kfree(req); return r; } -int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, - unsigned int count) +static void iowrite16v_urb_complete(struct urb *urb) +{ + struct zd_usb *usb = urb->context; + + if (urb->status && !usb->cmd_error) + usb->cmd_error = urb->status; +} + +static int zd_submit_waiting_urb(struct zd_usb *usb, bool last) +{ + int r = 0; + struct urb *urb = usb->urb_async_waiting; + + if (!urb) + return 0; + + usb->urb_async_waiting = NULL; + + if (!last) + urb->transfer_flags |= URB_NO_INTERRUPT; + + usb_anchor_urb(urb, &usb->submitted_cmds); + r = usb_submit_urb(urb, GFP_KERNEL); + if (r) { + usb_unanchor_urb(urb); + dev_dbg_f(zd_usb_dev(usb), + "error in usb_submit_urb(). Error number %d\n", r); + goto error; + } + + /* fall-through with r == 0 */ +error: + usb_free_urb(urb); + return r; +} + +void zd_usb_iowrite16v_async_start(struct zd_usb *usb) +{ + ZD_ASSERT(usb_anchor_empty(&usb->submitted_cmds)); + ZD_ASSERT(usb->urb_async_waiting == NULL); + ZD_ASSERT(!usb->in_async); + + ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex)); + + usb->in_async = 1; + usb->cmd_error = 0; + usb->urb_async_waiting = NULL; +} + +int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout) +{ + int r; + + ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex)); + ZD_ASSERT(usb->in_async); + + /* Submit last iowrite16v URB */ + r = zd_submit_waiting_urb(usb, true); + if (r) { + dev_dbg_f(zd_usb_dev(usb), + "error in zd_submit_waiting_usb(). " + "Error number %d\n", r); + + usb_kill_anchored_urbs(&usb->submitted_cmds); + goto error; + } + + if (timeout) + timeout = usb_wait_anchor_empty_timeout(&usb->submitted_cmds, + timeout); + if (!timeout) { + usb_kill_anchored_urbs(&usb->submitted_cmds); + if (usb->cmd_error == -ENOENT) { + dev_dbg_f(zd_usb_dev(usb), "timed out"); + r = -ETIMEDOUT; + goto error; + } + } + + r = usb->cmd_error; +error: + usb->in_async = 0; + return r; +} + +int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, + unsigned int count) { int r; struct usb_device *udev; struct usb_req_write_regs *req = NULL; - int i, req_len, actual_req_len; + int i, req_len; + struct urb *urb; + struct usb_host_endpoint *ep; + + ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex)); + ZD_ASSERT(usb->in_async); if (count == 0) return 0; @@ -1460,11 +1776,23 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, return -EWOULDBLOCK; } + udev = zd_usb_to_usbdev(usb); + + ep = usb_pipe_endpoint(udev, usb_sndintpipe(udev, EP_REGS_OUT)); + if (!ep) + return -ENOENT; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return -ENOMEM; + req_len = sizeof(struct usb_req_write_regs) + count * sizeof(struct reg_data); req = kmalloc(req_len, GFP_KERNEL); - if (!req) - return -ENOMEM; + if (!req) { + r = -ENOMEM; + goto error; + } req->id = cpu_to_le16(USB_REQ_WRITE_REGS); for (i = 0; i < count; i++) { @@ -1473,29 +1801,44 @@ int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, rw->value = cpu_to_le16(ioreqs[i].value); } - udev = zd_usb_to_usbdev(usb); - r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), - req, req_len, &actual_req_len, 1000 /* ms */); + usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT), + req, req_len, iowrite16v_urb_complete, usb, + ep->desc.bInterval); + urb->transfer_flags |= URB_FREE_BUFFER | URB_SHORT_NOT_OK; + + /* Submit previous URB */ + r = zd_submit_waiting_urb(usb, false); if (r) { dev_dbg_f(zd_usb_dev(usb), - "error in usb_bulk_msg(). Error number %d\n", r); - goto error; - } - if (req_len != actual_req_len) { - dev_dbg_f(zd_usb_dev(usb), - "error in usb_bulk_msg()" - " req_len %d != actual_req_len %d\n", - req_len, actual_req_len); - r = -EIO; + "error in zd_submit_waiting_usb(). " + "Error number %d\n", r); goto error; } - /* FALL-THROUGH with r == 0 */ + /* Delay submit so that URB_NO_INTERRUPT flag can be set for all URBs + * of currect batch except for very last. + */ + usb->urb_async_waiting = urb; + return 0; error: - kfree(req); + usb_free_urb(urb); return r; } +int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, + unsigned int count) +{ + int r; + + zd_usb_iowrite16v_async_start(usb); + r = zd_usb_iowrite16v_async(usb, ioreqs, count); + if (r) { + zd_usb_iowrite16v_async_end(usb, 0); + return r; + } + return zd_usb_iowrite16v_async_end(usb, 50 /* ms */); +} + int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits) { int r; @@ -1537,14 +1880,19 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits) if (r) { dev_dbg_f(zd_usb_dev(usb), "error %d: Couldn't read CR203\n", r); - goto out; + return r; } bit_value_template &= ~(RF_IF_LE|RF_CLK|RF_DATA); + ZD_ASSERT(mutex_is_locked(&zd_usb_to_chip(usb)->mutex)); + BUILD_BUG_ON(sizeof(struct usb_req_rfwrite) + + USB_MAX_RFWRITE_BIT_COUNT * sizeof(__le16) > + sizeof(usb->req_buf)); + BUG_ON(sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16) > + sizeof(usb->req_buf)); + req_len = sizeof(struct usb_req_rfwrite) + bits * sizeof(__le16); - req = kmalloc(req_len, GFP_KERNEL); - if (!req) - return -ENOMEM; + req = (void *)usb->req_buf; req->id = cpu_to_le16(USB_REQ_WRITE_RF); /* 1: 3683a, but not used in ZYDAS driver */ @@ -1559,15 +1907,15 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits) } udev = zd_usb_to_usbdev(usb); - r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_REGS_OUT), - req, req_len, &actual_req_len, 1000 /* ms */); + r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT), + req, req_len, &actual_req_len, 50 /* ms */); if (r) { dev_dbg_f(zd_usb_dev(usb), - "error in usb_bulk_msg(). Error number %d\n", r); + "error in usb_interrupt_msg(). Error number %d\n", r); goto out; } if (req_len != actual_req_len) { - dev_dbg_f(zd_usb_dev(usb), "error in usb_bulk_msg()" + dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()" " req_len %d != actual_req_len %d\n", req_len, actual_req_len); r = -EIO; @@ -1576,6 +1924,5 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits) /* FALL-THROUGH with r == 0 */ out: - kfree(req); return r; } diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h index 1b1655cb7cb4..b3df2c8116cc 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.h +++ b/drivers/net/wireless/zd1211rw/zd_usb.h @@ -32,6 +32,10 @@ #define ZD_USB_TX_HIGH 5 #define ZD_USB_TX_LOW 2 +#define ZD_TX_TIMEOUT (HZ * 5) +#define ZD_TX_WATCHDOG_INTERVAL round_jiffies_relative(HZ) +#define ZD_RX_IDLE_INTERVAL round_jiffies_relative(30 * HZ) + enum devicetype { DEVICE_ZD1211 = 0, DEVICE_ZD1211B = 1, @@ -162,6 +166,8 @@ struct zd_usb_interrupt { struct read_regs_int read_regs; spinlock_t lock; struct urb *urb; + void *buffer; + dma_addr_t buffer_dma; int interval; u8 read_regs_enabled:1; }; @@ -175,7 +181,9 @@ static inline struct usb_int_regs *get_read_regs(struct zd_usb_interrupt *intr) struct zd_usb_rx { spinlock_t lock; - u8 fragment[2*USB_MAX_RX_SIZE]; + struct mutex setup_mutex; + struct delayed_work idle_work; + u8 fragment[2 * USB_MAX_RX_SIZE]; unsigned int fragment_length; unsigned int usb_packet_size; struct urb **urbs; @@ -184,19 +192,21 @@ struct zd_usb_rx { /** * struct zd_usb_tx - structure used for transmitting frames + * @enabled: atomic enabled flag, indicates whether tx is enabled * @lock: lock for transmission - * @free_urb_list: list of free URBs, contains all the URBs, which can be used + * @submitted: anchor for URBs sent to device * @submitted_urbs: atomic integer that counts the URBs having sent to the * device, which haven't been completed - * @enabled: enabled flag, indicates whether tx is enabled * @stopped: indicates whether higher level tx queues are stopped */ struct zd_usb_tx { + atomic_t enabled; spinlock_t lock; - struct list_head free_urb_list; + struct delayed_work watchdog_work; + struct sk_buff_head submitted_skbs; + struct usb_anchor submitted; int submitted_urbs; - int enabled; - int stopped; + u8 stopped:1, watchdog_enabled:1; }; /* Contains the usb parts. The structure doesn't require a lock because intf @@ -207,7 +217,11 @@ struct zd_usb { struct zd_usb_rx rx; struct zd_usb_tx tx; struct usb_interface *intf; - u8 is_zd1211b:1, initialized:1; + struct usb_anchor submitted_cmds; + struct urb *urb_async_waiting; + int cmd_error; + u8 req_buf[64]; /* zd_usb_iowrite16v needs 62 bytes */ + u8 is_zd1211b:1, initialized:1, was_running:1, in_async:1; }; #define zd_usb_dev(usb) (&usb->intf->dev) @@ -234,12 +248,17 @@ void zd_usb_clear(struct zd_usb *usb); int zd_usb_scnprint_id(struct zd_usb *usb, char *buffer, size_t size); +void zd_tx_watchdog_enable(struct zd_usb *usb); +void zd_tx_watchdog_disable(struct zd_usb *usb); + int zd_usb_enable_int(struct zd_usb *usb); void zd_usb_disable_int(struct zd_usb *usb); int zd_usb_enable_rx(struct zd_usb *usb); void zd_usb_disable_rx(struct zd_usb *usb); +void zd_usb_reset_rx_idle_timer(struct zd_usb *usb); + void zd_usb_enable_tx(struct zd_usb *usb); void zd_usb_disable_tx(struct zd_usb *usb); @@ -254,6 +273,10 @@ static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value, return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1); } +void zd_usb_iowrite16v_async_start(struct zd_usb *usb); +int zd_usb_iowrite16v_async_end(struct zd_usb *usb, unsigned int timeout); +int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, + unsigned int count); int zd_usb_iowrite16v(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs, unsigned int count); diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile new file mode 100644 index 000000000000..e346e8125ef5 --- /dev/null +++ b/drivers/net/xen-netback/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o + +xen-netback-y := netback.o xenbus.o interface.o diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h new file mode 100644 index 000000000000..5d7bbf2b2ee7 --- /dev/null +++ b/drivers/net/xen-netback/common.h @@ -0,0 +1,161 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef __XEN_NETBACK__COMMON_H__ +#define __XEN_NETBACK__COMMON_H__ + +#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/io.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/wait.h> +#include <linux/sched.h> + +#include <xen/interface/io/netif.h> +#include <xen/interface/grant_table.h> +#include <xen/grant_table.h> +#include <xen/xenbus.h> + +struct xen_netbk; + +struct xenvif { + /* Unique identifier for this interface. */ + domid_t domid; + unsigned int handle; + + /* Reference to netback processing backend. */ + struct xen_netbk *netbk; + + u8 fe_dev_addr[6]; + + /* Physical parameters of the comms window. */ + grant_handle_t tx_shmem_handle; + grant_ref_t tx_shmem_ref; + grant_handle_t rx_shmem_handle; + grant_ref_t rx_shmem_ref; + unsigned int irq; + + /* List of frontends to notify after a batch of frames sent. */ + struct list_head notify_list; + + /* The shared rings and indexes. */ + struct xen_netif_tx_back_ring tx; + struct xen_netif_rx_back_ring rx; + struct vm_struct *tx_comms_area; + struct vm_struct *rx_comms_area; + + /* Flags that must not be set in dev->features */ + u32 features_disabled; + + /* Frontend feature information. */ + u8 can_sg:1; + u8 gso:1; + u8 gso_prefix:1; + u8 csum:1; + + /* Internal feature information. */ + u8 can_queue:1; /* can queue packets for receiver? */ + + /* + * Allow xenvif_start_xmit() to peek ahead in the rx request + * ring. This is a prediction of what rx_req_cons will be + * once all queued skbs are put on the ring. + */ + RING_IDX rx_req_cons_peek; + + /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ + unsigned long credit_bytes; + unsigned long credit_usec; + unsigned long remaining_credit; + struct timer_list credit_timeout; + + /* Statistics */ + unsigned long rx_gso_checksum_fixup; + + /* Miscellaneous private stuff. */ + struct list_head schedule_list; + atomic_t refcnt; + struct net_device *dev; + + wait_queue_head_t waiting_to_free; +}; + +#define XEN_NETIF_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) +#define XEN_NETIF_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) + +struct xenvif *xenvif_alloc(struct device *parent, + domid_t domid, + unsigned int handle); + +int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, + unsigned long rx_ring_ref, unsigned int evtchn); +void xenvif_disconnect(struct xenvif *vif); + +void xenvif_get(struct xenvif *vif); +void xenvif_put(struct xenvif *vif); + +int xenvif_xenbus_init(void); + +int xenvif_schedulable(struct xenvif *vif); + +int xen_netbk_rx_ring_full(struct xenvif *vif); + +int xen_netbk_must_stop_queue(struct xenvif *vif); + +/* (Un)Map communication rings. */ +void xen_netbk_unmap_frontend_rings(struct xenvif *vif); +int xen_netbk_map_frontend_rings(struct xenvif *vif, + grant_ref_t tx_ring_ref, + grant_ref_t rx_ring_ref); + +/* (De)Register a xenvif with the netback backend. */ +void xen_netbk_add_xenvif(struct xenvif *vif); +void xen_netbk_remove_xenvif(struct xenvif *vif); + +/* (De)Schedule backend processing for a xenvif */ +void xen_netbk_schedule_xenvif(struct xenvif *vif); +void xen_netbk_deschedule_xenvif(struct xenvif *vif); + +/* Check for SKBs from frontend and schedule backend processing */ +void xen_netbk_check_rx_xenvif(struct xenvif *vif); +/* Receive an SKB from the frontend */ +void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb); + +/* Queue an SKB for transmission to the frontend */ +void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb); +/* Notify xenvif that ring now has space to send an skb to the frontend */ +void xenvif_notify_tx_completion(struct xenvif *vif); + +/* Returns number of ring slots required to send an skb to the frontend */ +unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb); + +#endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c new file mode 100644 index 000000000000..de569cc19da4 --- /dev/null +++ b/drivers/net/xen-netback/interface.c @@ -0,0 +1,424 @@ +/* + * Network-device interface management. + * + * Copyright (c) 2004-2005, Keir Fraser + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "common.h" + +#include <linux/ethtool.h> +#include <linux/rtnetlink.h> +#include <linux/if_vlan.h> + +#include <xen/events.h> +#include <asm/xen/hypercall.h> + +#define XENVIF_QUEUE_LENGTH 32 + +void xenvif_get(struct xenvif *vif) +{ + atomic_inc(&vif->refcnt); +} + +void xenvif_put(struct xenvif *vif) +{ + if (atomic_dec_and_test(&vif->refcnt)) + wake_up(&vif->waiting_to_free); +} + +int xenvif_schedulable(struct xenvif *vif) +{ + return netif_running(vif->dev) && netif_carrier_ok(vif->dev); +} + +static int xenvif_rx_schedulable(struct xenvif *vif) +{ + return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif); +} + +static irqreturn_t xenvif_interrupt(int irq, void *dev_id) +{ + struct xenvif *vif = dev_id; + + if (vif->netbk == NULL) + return IRQ_NONE; + + xen_netbk_schedule_xenvif(vif); + + if (xenvif_rx_schedulable(vif)) + netif_wake_queue(vif->dev); + + return IRQ_HANDLED; +} + +static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct xenvif *vif = netdev_priv(dev); + + BUG_ON(skb->dev != dev); + + if (vif->netbk == NULL) + goto drop; + + /* Drop the packet if the target domain has no receive buffers. */ + if (!xenvif_rx_schedulable(vif)) + goto drop; + + /* Reserve ring slots for the worst-case number of fragments. */ + vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb); + xenvif_get(vif); + + if (vif->can_queue && xen_netbk_must_stop_queue(vif)) + netif_stop_queue(dev); + + xen_netbk_queue_tx_skb(vif, skb); + + return NETDEV_TX_OK; + + drop: + vif->dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb) +{ + netif_rx_ni(skb); +} + +void xenvif_notify_tx_completion(struct xenvif *vif) +{ + if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) + netif_wake_queue(vif->dev); +} + +static struct net_device_stats *xenvif_get_stats(struct net_device *dev) +{ + struct xenvif *vif = netdev_priv(dev); + return &vif->dev->stats; +} + +static void xenvif_up(struct xenvif *vif) +{ + xen_netbk_add_xenvif(vif); + enable_irq(vif->irq); + xen_netbk_check_rx_xenvif(vif); +} + +static void xenvif_down(struct xenvif *vif) +{ + disable_irq(vif->irq); + xen_netbk_deschedule_xenvif(vif); + xen_netbk_remove_xenvif(vif); +} + +static int xenvif_open(struct net_device *dev) +{ + struct xenvif *vif = netdev_priv(dev); + if (netif_carrier_ok(dev)) + xenvif_up(vif); + netif_start_queue(dev); + return 0; +} + +static int xenvif_close(struct net_device *dev) +{ + struct xenvif *vif = netdev_priv(dev); + if (netif_carrier_ok(dev)) + xenvif_down(vif); + netif_stop_queue(dev); + return 0; +} + +static int xenvif_change_mtu(struct net_device *dev, int mtu) +{ + struct xenvif *vif = netdev_priv(dev); + int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN; + + if (mtu > max) + return -EINVAL; + dev->mtu = mtu; + return 0; +} + +static void xenvif_set_features(struct xenvif *vif) +{ + struct net_device *dev = vif->dev; + u32 features = dev->features; + + if (vif->can_sg) + features |= NETIF_F_SG; + if (vif->gso || vif->gso_prefix) + features |= NETIF_F_TSO; + if (vif->csum) + features |= NETIF_F_IP_CSUM; + + features &= ~(vif->features_disabled); + + if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) + dev->mtu = ETH_DATA_LEN; + + dev->features = features; +} + +static int xenvif_set_tx_csum(struct net_device *dev, u32 data) +{ + struct xenvif *vif = netdev_priv(dev); + if (data) { + if (!vif->csum) + return -EOPNOTSUPP; + vif->features_disabled &= ~NETIF_F_IP_CSUM; + } else { + vif->features_disabled |= NETIF_F_IP_CSUM; + } + + xenvif_set_features(vif); + return 0; +} + +static int xenvif_set_sg(struct net_device *dev, u32 data) +{ + struct xenvif *vif = netdev_priv(dev); + if (data) { + if (!vif->can_sg) + return -EOPNOTSUPP; + vif->features_disabled &= ~NETIF_F_SG; + } else { + vif->features_disabled |= NETIF_F_SG; + } + + xenvif_set_features(vif); + return 0; +} + +static int xenvif_set_tso(struct net_device *dev, u32 data) +{ + struct xenvif *vif = netdev_priv(dev); + if (data) { + if (!vif->gso && !vif->gso_prefix) + return -EOPNOTSUPP; + vif->features_disabled &= ~NETIF_F_TSO; + } else { + vif->features_disabled |= NETIF_F_TSO; + } + + xenvif_set_features(vif); + return 0; +} + +static const struct xenvif_stat { + char name[ETH_GSTRING_LEN]; + u16 offset; +} xenvif_stats[] = { + { + "rx_gso_checksum_fixup", + offsetof(struct xenvif, rx_gso_checksum_fixup) + }, +}; + +static int xenvif_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return ARRAY_SIZE(xenvif_stats); + default: + return -EINVAL; + } +} + +static void xenvif_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 * data) +{ + void *vif = netdev_priv(dev); + int i; + + for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) + data[i] = *(unsigned long *)(vif + xenvif_stats[i].offset); +} + +static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + xenvif_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static struct ethtool_ops xenvif_ethtool_ops = { + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = xenvif_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = xenvif_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = xenvif_set_tso, + .get_link = ethtool_op_get_link, + + .get_sset_count = xenvif_get_sset_count, + .get_ethtool_stats = xenvif_get_ethtool_stats, + .get_strings = xenvif_get_strings, +}; + +static struct net_device_ops xenvif_netdev_ops = { + .ndo_start_xmit = xenvif_start_xmit, + .ndo_get_stats = xenvif_get_stats, + .ndo_open = xenvif_open, + .ndo_stop = xenvif_close, + .ndo_change_mtu = xenvif_change_mtu, +}; + +struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, + unsigned int handle) +{ + int err; + struct net_device *dev; + struct xenvif *vif; + char name[IFNAMSIZ] = {}; + + snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); + dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup); + if (dev == NULL) { + pr_warn("Could not allocate netdev\n"); + return ERR_PTR(-ENOMEM); + } + + SET_NETDEV_DEV(dev, parent); + + vif = netdev_priv(dev); + vif->domid = domid; + vif->handle = handle; + vif->netbk = NULL; + vif->can_sg = 1; + vif->csum = 1; + atomic_set(&vif->refcnt, 1); + init_waitqueue_head(&vif->waiting_to_free); + vif->dev = dev; + INIT_LIST_HEAD(&vif->schedule_list); + INIT_LIST_HEAD(&vif->notify_list); + + vif->credit_bytes = vif->remaining_credit = ~0UL; + vif->credit_usec = 0UL; + init_timer(&vif->credit_timeout); + /* Initialize 'expires' now: it's used to track the credit window. */ + vif->credit_timeout.expires = jiffies; + + dev->netdev_ops = &xenvif_netdev_ops; + xenvif_set_features(vif); + SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); + + dev->tx_queue_len = XENVIF_QUEUE_LENGTH; + + /* + * Initialise a dummy MAC address. We choose the numerically + * largest non-broadcast address to prevent the address getting + * stolen by an Ethernet bridge for STP purposes. + * (FE:FF:FF:FF:FF:FF) + */ + memset(dev->dev_addr, 0xFF, ETH_ALEN); + dev->dev_addr[0] &= ~0x01; + + netif_carrier_off(dev); + + err = register_netdev(dev); + if (err) { + netdev_warn(dev, "Could not register device: err=%d\n", err); + free_netdev(dev); + return ERR_PTR(err); + } + + netdev_dbg(dev, "Successfully created xenvif\n"); + return vif; +} + +int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, + unsigned long rx_ring_ref, unsigned int evtchn) +{ + int err = -ENOMEM; + + /* Already connected through? */ + if (vif->irq) + return 0; + + xenvif_set_features(vif); + + err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref); + if (err < 0) + goto err; + + err = bind_interdomain_evtchn_to_irqhandler( + vif->domid, evtchn, xenvif_interrupt, 0, + vif->dev->name, vif); + if (err < 0) + goto err_unmap; + vif->irq = err; + disable_irq(vif->irq); + + xenvif_get(vif); + + rtnl_lock(); + netif_carrier_on(vif->dev); + if (netif_running(vif->dev)) + xenvif_up(vif); + rtnl_unlock(); + + return 0; +err_unmap: + xen_netbk_unmap_frontend_rings(vif); +err: + return err; +} + +void xenvif_disconnect(struct xenvif *vif) +{ + struct net_device *dev = vif->dev; + if (netif_carrier_ok(dev)) { + rtnl_lock(); + netif_carrier_off(dev); /* discard queued packets */ + if (netif_running(dev)) + xenvif_down(vif); + rtnl_unlock(); + xenvif_put(vif); + } + + atomic_dec(&vif->refcnt); + wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0); + + del_timer_sync(&vif->credit_timeout); + + if (vif->irq) + unbind_from_irqhandler(vif->irq, vif); + + unregister_netdev(vif->dev); + + xen_netbk_unmap_frontend_rings(vif); + + free_netdev(vif->dev); +} diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c new file mode 100644 index 000000000000..0e4851b8a773 --- /dev/null +++ b/drivers/net/xen-netback/netback.c @@ -0,0 +1,1745 @@ +/* + * Back-end of the driver for virtual network devices. This portion of the + * driver exports a 'unified' network-device interface that can be accessed + * by any operating system that implements a compatible front end. A + * reference front-end implementation can be found in: + * drivers/net/xen-netfront.c + * + * Copyright (c) 2002-2005, K A Fraser + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "common.h" + +#include <linux/kthread.h> +#include <linux/if_vlan.h> +#include <linux/udp.h> + +#include <net/tcp.h> + +#include <xen/events.h> +#include <xen/interface/memory.h> + +#include <asm/xen/hypercall.h> +#include <asm/xen/page.h> + +struct pending_tx_info { + struct xen_netif_tx_request req; + struct xenvif *vif; +}; +typedef unsigned int pending_ring_idx_t; + +struct netbk_rx_meta { + int id; + int size; + int gso_size; +}; + +#define MAX_PENDING_REQS 256 + +#define MAX_BUFFER_OFFSET PAGE_SIZE + +/* extra field used in struct page */ +union page_ext { + struct { +#if BITS_PER_LONG < 64 +#define IDX_WIDTH 8 +#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH) + unsigned int group:GROUP_WIDTH; + unsigned int idx:IDX_WIDTH; +#else + unsigned int group, idx; +#endif + } e; + void *mapping; +}; + +struct xen_netbk { + wait_queue_head_t wq; + struct task_struct *task; + + struct sk_buff_head rx_queue; + struct sk_buff_head tx_queue; + + struct timer_list net_timer; + + struct page *mmap_pages[MAX_PENDING_REQS]; + + pending_ring_idx_t pending_prod; + pending_ring_idx_t pending_cons; + struct list_head net_schedule_list; + + /* Protect the net_schedule_list in netif. */ + spinlock_t net_schedule_list_lock; + + atomic_t netfront_count; + + struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; + struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; + + u16 pending_ring[MAX_PENDING_REQS]; + + /* + * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each + * head/fragment page uses 2 copy operations because it + * straddles two buffers in the frontend. + */ + struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; + struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; +}; + +static struct xen_netbk *xen_netbk; +static int xen_netbk_group_nr; + +void xen_netbk_add_xenvif(struct xenvif *vif) +{ + int i; + int min_netfront_count; + int min_group = 0; + struct xen_netbk *netbk; + + min_netfront_count = atomic_read(&xen_netbk[0].netfront_count); + for (i = 0; i < xen_netbk_group_nr; i++) { + int netfront_count = atomic_read(&xen_netbk[i].netfront_count); + if (netfront_count < min_netfront_count) { + min_group = i; + min_netfront_count = netfront_count; + } + } + + netbk = &xen_netbk[min_group]; + + vif->netbk = netbk; + atomic_inc(&netbk->netfront_count); +} + +void xen_netbk_remove_xenvif(struct xenvif *vif) +{ + struct xen_netbk *netbk = vif->netbk; + vif->netbk = NULL; + atomic_dec(&netbk->netfront_count); +} + +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx); +static void make_tx_response(struct xenvif *vif, + struct xen_netif_tx_request *txp, + s8 st); +static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, + u16 id, + s8 st, + u16 offset, + u16 size, + u16 flags); + +static inline unsigned long idx_to_pfn(struct xen_netbk *netbk, + unsigned int idx) +{ + return page_to_pfn(netbk->mmap_pages[idx]); +} + +static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk, + unsigned int idx) +{ + return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx)); +} + +/* extra field used in struct page */ +static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk, + unsigned int idx) +{ + unsigned int group = netbk - xen_netbk; + union page_ext ext = { .e = { .group = group + 1, .idx = idx } }; + + BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping)); + pg->mapping = ext.mapping; +} + +static int get_page_ext(struct page *pg, + unsigned int *pgroup, unsigned int *pidx) +{ + union page_ext ext = { .mapping = pg->mapping }; + struct xen_netbk *netbk; + unsigned int group, idx; + + group = ext.e.group - 1; + + if (group < 0 || group >= xen_netbk_group_nr) + return 0; + + netbk = &xen_netbk[group]; + + idx = ext.e.idx; + + if ((idx < 0) || (idx >= MAX_PENDING_REQS)) + return 0; + + if (netbk->mmap_pages[idx] != pg) + return 0; + + *pgroup = group; + *pidx = idx; + + return 1; +} + +/* + * This is the amount of packet we copy rather than map, so that the + * guest can't fiddle with the contents of the headers while we do + * packet processing on them (netfilter, routing, etc). + */ +#define PKT_PROT_LEN (ETH_HLEN + \ + VLAN_HLEN + \ + sizeof(struct iphdr) + MAX_IPOPTLEN + \ + sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE) + +static inline pending_ring_idx_t pending_index(unsigned i) +{ + return i & (MAX_PENDING_REQS-1); +} + +static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk) +{ + return MAX_PENDING_REQS - + netbk->pending_prod + netbk->pending_cons; +} + +static void xen_netbk_kick_thread(struct xen_netbk *netbk) +{ + wake_up(&netbk->wq); +} + +static int max_required_rx_slots(struct xenvif *vif) +{ + int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); + + if (vif->can_sg || vif->gso || vif->gso_prefix) + max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ + + return max; +} + +int xen_netbk_rx_ring_full(struct xenvif *vif) +{ + RING_IDX peek = vif->rx_req_cons_peek; + RING_IDX needed = max_required_rx_slots(vif); + + return ((vif->rx.sring->req_prod - peek) < needed) || + ((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed); +} + +int xen_netbk_must_stop_queue(struct xenvif *vif) +{ + if (!xen_netbk_rx_ring_full(vif)) + return 0; + + vif->rx.sring->req_event = vif->rx_req_cons_peek + + max_required_rx_slots(vif); + mb(); /* request notification /then/ check the queue */ + + return xen_netbk_rx_ring_full(vif); +} + +/* + * Returns true if we should start a new receive buffer instead of + * adding 'size' bytes to a buffer which currently contains 'offset' + * bytes. + */ +static bool start_new_rx_buffer(int offset, unsigned long size, int head) +{ + /* simple case: we have completely filled the current buffer. */ + if (offset == MAX_BUFFER_OFFSET) + return true; + + /* + * complex case: start a fresh buffer if the current frag + * would overflow the current buffer but only if: + * (i) this frag would fit completely in the next buffer + * and (ii) there is already some data in the current buffer + * and (iii) this is not the head buffer. + * + * Where: + * - (i) stops us splitting a frag into two copies + * unless the frag is too large for a single buffer. + * - (ii) stops us from leaving a buffer pointlessly empty. + * - (iii) stops us leaving the first buffer + * empty. Strictly speaking this is already covered + * by (ii) but is explicitly checked because + * netfront relies on the first buffer being + * non-empty and can crash otherwise. + * + * This means we will effectively linearise small + * frags but do not needlessly split large buffers + * into multiple copies tend to give large frags their + * own buffers as before. + */ + if ((offset + size > MAX_BUFFER_OFFSET) && + (size <= MAX_BUFFER_OFFSET) && offset && !head) + return true; + + return false; +} + +/* + * Figure out how many ring slots we're going to need to send @skb to + * the guest. This function is essentially a dry run of + * netbk_gop_frag_copy. + */ +unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb) +{ + unsigned int count; + int i, copy_off; + + count = DIV_ROUND_UP( + offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE); + + copy_off = skb_headlen(skb) % PAGE_SIZE; + + if (skb_shinfo(skb)->gso_size) + count++; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + unsigned long size = skb_shinfo(skb)->frags[i].size; + unsigned long bytes; + while (size > 0) { + BUG_ON(copy_off > MAX_BUFFER_OFFSET); + + if (start_new_rx_buffer(copy_off, size, 0)) { + count++; + copy_off = 0; + } + + bytes = size; + if (copy_off + bytes > MAX_BUFFER_OFFSET) + bytes = MAX_BUFFER_OFFSET - copy_off; + + copy_off += bytes; + size -= bytes; + } + } + return count; +} + +struct netrx_pending_operations { + unsigned copy_prod, copy_cons; + unsigned meta_prod, meta_cons; + struct gnttab_copy *copy; + struct netbk_rx_meta *meta; + int copy_off; + grant_ref_t copy_gref; +}; + +static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif, + struct netrx_pending_operations *npo) +{ + struct netbk_rx_meta *meta; + struct xen_netif_rx_request *req; + + req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); + + meta = npo->meta + npo->meta_prod++; + meta->gso_size = 0; + meta->size = 0; + meta->id = req->id; + + npo->copy_off = 0; + npo->copy_gref = req->gref; + + return meta; +} + +/* + * Set up the grant operations for this fragment. If it's a flipping + * interface, we also set up the unmap request from here. + */ +static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, + struct netrx_pending_operations *npo, + struct page *page, unsigned long size, + unsigned long offset, int *head) +{ + struct gnttab_copy *copy_gop; + struct netbk_rx_meta *meta; + /* + * These variables a used iff get_page_ext returns true, + * in which case they are guaranteed to be initialized. + */ + unsigned int uninitialized_var(group), uninitialized_var(idx); + int foreign = get_page_ext(page, &group, &idx); + unsigned long bytes; + + /* Data must not cross a page boundary. */ + BUG_ON(size + offset > PAGE_SIZE); + + meta = npo->meta + npo->meta_prod - 1; + + while (size > 0) { + BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET); + + if (start_new_rx_buffer(npo->copy_off, size, *head)) { + /* + * Netfront requires there to be some data in the head + * buffer. + */ + BUG_ON(*head); + + meta = get_next_rx_buffer(vif, npo); + } + + bytes = size; + if (npo->copy_off + bytes > MAX_BUFFER_OFFSET) + bytes = MAX_BUFFER_OFFSET - npo->copy_off; + + copy_gop = npo->copy + npo->copy_prod++; + copy_gop->flags = GNTCOPY_dest_gref; + if (foreign) { + struct xen_netbk *netbk = &xen_netbk[group]; + struct pending_tx_info *src_pend; + + src_pend = &netbk->pending_tx_info[idx]; + + copy_gop->source.domid = src_pend->vif->domid; + copy_gop->source.u.ref = src_pend->req.gref; + copy_gop->flags |= GNTCOPY_source_gref; + } else { + void *vaddr = page_address(page); + copy_gop->source.domid = DOMID_SELF; + copy_gop->source.u.gmfn = virt_to_mfn(vaddr); + } + copy_gop->source.offset = offset; + copy_gop->dest.domid = vif->domid; + + copy_gop->dest.offset = npo->copy_off; + copy_gop->dest.u.ref = npo->copy_gref; + copy_gop->len = bytes; + + npo->copy_off += bytes; + meta->size += bytes; + + offset += bytes; + size -= bytes; + + /* Leave a gap for the GSO descriptor. */ + if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) + vif->rx.req_cons++; + + *head = 0; /* There must be something in this buffer now. */ + + } +} + +/* + * Prepare an SKB to be transmitted to the frontend. + * + * This function is responsible for allocating grant operations, meta + * structures, etc. + * + * It returns the number of meta structures consumed. The number of + * ring slots used is always equal to the number of meta slots used + * plus the number of GSO descriptors used. Currently, we use either + * zero GSO descriptors (for non-GSO packets) or one descriptor (for + * frontend-side LRO). + */ +static int netbk_gop_skb(struct sk_buff *skb, + struct netrx_pending_operations *npo) +{ + struct xenvif *vif = netdev_priv(skb->dev); + int nr_frags = skb_shinfo(skb)->nr_frags; + int i; + struct xen_netif_rx_request *req; + struct netbk_rx_meta *meta; + unsigned char *data; + int head = 1; + int old_meta_prod; + + old_meta_prod = npo->meta_prod; + + /* Set up a GSO prefix descriptor, if necessary */ + if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { + req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); + meta = npo->meta + npo->meta_prod++; + meta->gso_size = skb_shinfo(skb)->gso_size; + meta->size = 0; + meta->id = req->id; + } + + req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); + meta = npo->meta + npo->meta_prod++; + + if (!vif->gso_prefix) + meta->gso_size = skb_shinfo(skb)->gso_size; + else + meta->gso_size = 0; + + meta->size = 0; + meta->id = req->id; + npo->copy_off = 0; + npo->copy_gref = req->gref; + + data = skb->data; + while (data < skb_tail_pointer(skb)) { + unsigned int offset = offset_in_page(data); + unsigned int len = PAGE_SIZE - offset; + + if (data + len > skb_tail_pointer(skb)) + len = skb_tail_pointer(skb) - data; + + netbk_gop_frag_copy(vif, skb, npo, + virt_to_page(data), len, offset, &head); + data += len; + } + + for (i = 0; i < nr_frags; i++) { + netbk_gop_frag_copy(vif, skb, npo, + skb_shinfo(skb)->frags[i].page, + skb_shinfo(skb)->frags[i].size, + skb_shinfo(skb)->frags[i].page_offset, + &head); + } + + return npo->meta_prod - old_meta_prod; +} + +/* + * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was + * used to set up the operations on the top of + * netrx_pending_operations, which have since been done. Check that + * they didn't give any errors and advance over them. + */ +static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots, + struct netrx_pending_operations *npo) +{ + struct gnttab_copy *copy_op; + int status = XEN_NETIF_RSP_OKAY; + int i; + + for (i = 0; i < nr_meta_slots; i++) { + copy_op = npo->copy + npo->copy_cons++; + if (copy_op->status != GNTST_okay) { + netdev_dbg(vif->dev, + "Bad status %d from copy to DOM%d.\n", + copy_op->status, vif->domid); + status = XEN_NETIF_RSP_ERROR; + } + } + + return status; +} + +static void netbk_add_frag_responses(struct xenvif *vif, int status, + struct netbk_rx_meta *meta, + int nr_meta_slots) +{ + int i; + unsigned long offset; + + /* No fragments used */ + if (nr_meta_slots <= 1) + return; + + nr_meta_slots--; + + for (i = 0; i < nr_meta_slots; i++) { + int flags; + if (i == nr_meta_slots - 1) + flags = 0; + else + flags = XEN_NETRXF_more_data; + + offset = 0; + make_rx_response(vif, meta[i].id, status, offset, + meta[i].size, flags); + } +} + +struct skb_cb_overlay { + int meta_slots_used; +}; + +static void xen_netbk_rx_action(struct xen_netbk *netbk) +{ + struct xenvif *vif = NULL, *tmp; + s8 status; + u16 irq, flags; + struct xen_netif_rx_response *resp; + struct sk_buff_head rxq; + struct sk_buff *skb; + LIST_HEAD(notify); + int ret; + int nr_frags; + int count; + unsigned long offset; + struct skb_cb_overlay *sco; + + struct netrx_pending_operations npo = { + .copy = netbk->grant_copy_op, + .meta = netbk->meta, + }; + + skb_queue_head_init(&rxq); + + count = 0; + + while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) { + vif = netdev_priv(skb->dev); + nr_frags = skb_shinfo(skb)->nr_frags; + + sco = (struct skb_cb_overlay *)skb->cb; + sco->meta_slots_used = netbk_gop_skb(skb, &npo); + + count += nr_frags + 1; + + __skb_queue_tail(&rxq, skb); + + /* Filled the batch queue? */ + if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE) + break; + } + + BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta)); + + if (!npo.copy_prod) + return; + + BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op)); + ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, &netbk->grant_copy_op, + npo.copy_prod); + BUG_ON(ret != 0); + + while ((skb = __skb_dequeue(&rxq)) != NULL) { + sco = (struct skb_cb_overlay *)skb->cb; + + vif = netdev_priv(skb->dev); + + if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) { + resp = RING_GET_RESPONSE(&vif->rx, + vif->rx.rsp_prod_pvt++); + + resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data; + + resp->offset = netbk->meta[npo.meta_cons].gso_size; + resp->id = netbk->meta[npo.meta_cons].id; + resp->status = sco->meta_slots_used; + + npo.meta_cons++; + sco->meta_slots_used--; + } + + + vif->dev->stats.tx_bytes += skb->len; + vif->dev->stats.tx_packets++; + + status = netbk_check_gop(vif, sco->meta_slots_used, &npo); + + if (sco->meta_slots_used == 1) + flags = 0; + else + flags = XEN_NETRXF_more_data; + + if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ + flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated; + else if (skb->ip_summed == CHECKSUM_UNNECESSARY) + /* remote but checksummed. */ + flags |= XEN_NETRXF_data_validated; + + offset = 0; + resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id, + status, offset, + netbk->meta[npo.meta_cons].size, + flags); + + if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { + struct xen_netif_extra_info *gso = + (struct xen_netif_extra_info *) + RING_GET_RESPONSE(&vif->rx, + vif->rx.rsp_prod_pvt++); + + resp->flags |= XEN_NETRXF_extra_info; + + gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size; + gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; + gso->u.gso.pad = 0; + gso->u.gso.features = 0; + + gso->type = XEN_NETIF_EXTRA_TYPE_GSO; + gso->flags = 0; + } + + netbk_add_frag_responses(vif, status, + netbk->meta + npo.meta_cons + 1, + sco->meta_slots_used); + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret); + irq = vif->irq; + if (ret && list_empty(&vif->notify_list)) + list_add_tail(&vif->notify_list, ¬ify); + + xenvif_notify_tx_completion(vif); + + xenvif_put(vif); + npo.meta_cons += sco->meta_slots_used; + dev_kfree_skb(skb); + } + + list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) { + notify_remote_via_irq(vif->irq); + list_del_init(&vif->notify_list); + } + + /* More work to do? */ + if (!skb_queue_empty(&netbk->rx_queue) && + !timer_pending(&netbk->net_timer)) + xen_netbk_kick_thread(netbk); +} + +void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb) +{ + struct xen_netbk *netbk = vif->netbk; + + skb_queue_tail(&netbk->rx_queue, skb); + + xen_netbk_kick_thread(netbk); +} + +static void xen_netbk_alarm(unsigned long data) +{ + struct xen_netbk *netbk = (struct xen_netbk *)data; + xen_netbk_kick_thread(netbk); +} + +static int __on_net_schedule_list(struct xenvif *vif) +{ + return !list_empty(&vif->schedule_list); +} + +/* Must be called with net_schedule_list_lock held */ +static void remove_from_net_schedule_list(struct xenvif *vif) +{ + if (likely(__on_net_schedule_list(vif))) { + list_del_init(&vif->schedule_list); + xenvif_put(vif); + } +} + +static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk) +{ + struct xenvif *vif = NULL; + + spin_lock_irq(&netbk->net_schedule_list_lock); + if (list_empty(&netbk->net_schedule_list)) + goto out; + + vif = list_first_entry(&netbk->net_schedule_list, + struct xenvif, schedule_list); + if (!vif) + goto out; + + xenvif_get(vif); + + remove_from_net_schedule_list(vif); +out: + spin_unlock_irq(&netbk->net_schedule_list_lock); + return vif; +} + +void xen_netbk_schedule_xenvif(struct xenvif *vif) +{ + unsigned long flags; + struct xen_netbk *netbk = vif->netbk; + + if (__on_net_schedule_list(vif)) + goto kick; + + spin_lock_irqsave(&netbk->net_schedule_list_lock, flags); + if (!__on_net_schedule_list(vif) && + likely(xenvif_schedulable(vif))) { + list_add_tail(&vif->schedule_list, &netbk->net_schedule_list); + xenvif_get(vif); + } + spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags); + +kick: + smp_mb(); + if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) && + !list_empty(&netbk->net_schedule_list)) + xen_netbk_kick_thread(netbk); +} + +void xen_netbk_deschedule_xenvif(struct xenvif *vif) +{ + struct xen_netbk *netbk = vif->netbk; + spin_lock_irq(&netbk->net_schedule_list_lock); + remove_from_net_schedule_list(vif); + spin_unlock_irq(&netbk->net_schedule_list_lock); +} + +void xen_netbk_check_rx_xenvif(struct xenvif *vif) +{ + int more_to_do; + + RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); + + if (more_to_do) + xen_netbk_schedule_xenvif(vif); +} + +static void tx_add_credit(struct xenvif *vif) +{ + unsigned long max_burst, max_credit; + + /* + * Allow a burst big enough to transmit a jumbo packet of up to 128kB. + * Otherwise the interface can seize up due to insufficient credit. + */ + max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size; + max_burst = min(max_burst, 131072UL); + max_burst = max(max_burst, vif->credit_bytes); + + /* Take care that adding a new chunk of credit doesn't wrap to zero. */ + max_credit = vif->remaining_credit + vif->credit_bytes; + if (max_credit < vif->remaining_credit) + max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */ + + vif->remaining_credit = min(max_credit, max_burst); +} + +static void tx_credit_callback(unsigned long data) +{ + struct xenvif *vif = (struct xenvif *)data; + tx_add_credit(vif); + xen_netbk_check_rx_xenvif(vif); +} + +static void netbk_tx_err(struct xenvif *vif, + struct xen_netif_tx_request *txp, RING_IDX end) +{ + RING_IDX cons = vif->tx.req_cons; + + do { + make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); + if (cons >= end) + break; + txp = RING_GET_REQUEST(&vif->tx, cons++); + } while (1); + vif->tx.req_cons = cons; + xen_netbk_check_rx_xenvif(vif); + xenvif_put(vif); +} + +static int netbk_count_requests(struct xenvif *vif, + struct xen_netif_tx_request *first, + struct xen_netif_tx_request *txp, + int work_to_do) +{ + RING_IDX cons = vif->tx.req_cons; + int frags = 0; + + if (!(first->flags & XEN_NETTXF_more_data)) + return 0; + + do { + if (frags >= work_to_do) { + netdev_dbg(vif->dev, "Need more frags\n"); + return -frags; + } + + if (unlikely(frags >= MAX_SKB_FRAGS)) { + netdev_dbg(vif->dev, "Too many frags\n"); + return -frags; + } + + memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags), + sizeof(*txp)); + if (txp->size > first->size) { + netdev_dbg(vif->dev, "Frags galore\n"); + return -frags; + } + + first->size -= txp->size; + frags++; + + if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) { + netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n", + txp->offset, txp->size); + return -frags; + } + } while ((txp++)->flags & XEN_NETTXF_more_data); + return frags; +} + +static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, + struct sk_buff *skb, + unsigned long pending_idx) +{ + struct page *page; + page = alloc_page(GFP_KERNEL|__GFP_COLD); + if (!page) + return NULL; + set_page_ext(page, netbk, pending_idx); + netbk->mmap_pages[pending_idx] = page; + return page; +} + +static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk, + struct xenvif *vif, + struct sk_buff *skb, + struct xen_netif_tx_request *txp, + struct gnttab_copy *gop) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + skb_frag_t *frags = shinfo->frags; + unsigned long pending_idx = *((u16 *)skb->data); + int i, start; + + /* Skip first skb fragment if it is on same page as header fragment. */ + start = ((unsigned long)shinfo->frags[0].page == pending_idx); + + for (i = start; i < shinfo->nr_frags; i++, txp++) { + struct page *page; + pending_ring_idx_t index; + struct pending_tx_info *pending_tx_info = + netbk->pending_tx_info; + + index = pending_index(netbk->pending_cons++); + pending_idx = netbk->pending_ring[index]; + page = xen_netbk_alloc_page(netbk, skb, pending_idx); + if (!page) + return NULL; + + netbk->mmap_pages[pending_idx] = page; + + gop->source.u.ref = txp->gref; + gop->source.domid = vif->domid; + gop->source.offset = txp->offset; + + gop->dest.u.gmfn = virt_to_mfn(page_address(page)); + gop->dest.domid = DOMID_SELF; + gop->dest.offset = txp->offset; + + gop->len = txp->size; + gop->flags = GNTCOPY_source_gref; + + gop++; + + memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp)); + xenvif_get(vif); + pending_tx_info[pending_idx].vif = vif; + frags[i].page = (void *)pending_idx; + } + + return gop; +} + +static int xen_netbk_tx_check_gop(struct xen_netbk *netbk, + struct sk_buff *skb, + struct gnttab_copy **gopp) +{ + struct gnttab_copy *gop = *gopp; + int pending_idx = *((u16 *)skb->data); + struct pending_tx_info *pending_tx_info = netbk->pending_tx_info; + struct xenvif *vif = pending_tx_info[pending_idx].vif; + struct xen_netif_tx_request *txp; + struct skb_shared_info *shinfo = skb_shinfo(skb); + int nr_frags = shinfo->nr_frags; + int i, err, start; + + /* Check status of header. */ + err = gop->status; + if (unlikely(err)) { + pending_ring_idx_t index; + index = pending_index(netbk->pending_prod++); + txp = &pending_tx_info[pending_idx].req; + make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); + netbk->pending_ring[index] = pending_idx; + xenvif_put(vif); + } + + /* Skip first skb fragment if it is on same page as header fragment. */ + start = ((unsigned long)shinfo->frags[0].page == pending_idx); + + for (i = start; i < nr_frags; i++) { + int j, newerr; + pending_ring_idx_t index; + + pending_idx = (unsigned long)shinfo->frags[i].page; + + /* Check error status: if okay then remember grant handle. */ + newerr = (++gop)->status; + if (likely(!newerr)) { + /* Had a previous error? Invalidate this fragment. */ + if (unlikely(err)) + xen_netbk_idx_release(netbk, pending_idx); + continue; + } + + /* Error on this fragment: respond to client with an error. */ + txp = &netbk->pending_tx_info[pending_idx].req; + make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR); + index = pending_index(netbk->pending_prod++); + netbk->pending_ring[index] = pending_idx; + xenvif_put(vif); + + /* Not the first error? Preceding frags already invalidated. */ + if (err) + continue; + + /* First error: invalidate header and preceding fragments. */ + pending_idx = *((u16 *)skb->data); + xen_netbk_idx_release(netbk, pending_idx); + for (j = start; j < i; j++) { + pending_idx = (unsigned long)shinfo->frags[i].page; + xen_netbk_idx_release(netbk, pending_idx); + } + + /* Remember the error: invalidate all subsequent fragments. */ + err = newerr; + } + + *gopp = gop + 1; + return err; +} + +static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + int nr_frags = shinfo->nr_frags; + int i; + + for (i = 0; i < nr_frags; i++) { + skb_frag_t *frag = shinfo->frags + i; + struct xen_netif_tx_request *txp; + unsigned long pending_idx; + + pending_idx = (unsigned long)frag->page; + + txp = &netbk->pending_tx_info[pending_idx].req; + frag->page = virt_to_page(idx_to_kaddr(netbk, pending_idx)); + frag->size = txp->size; + frag->page_offset = txp->offset; + + skb->len += txp->size; + skb->data_len += txp->size; + skb->truesize += txp->size; + + /* Take an extra reference to offset xen_netbk_idx_release */ + get_page(netbk->mmap_pages[pending_idx]); + xen_netbk_idx_release(netbk, pending_idx); + } +} + +static int xen_netbk_get_extras(struct xenvif *vif, + struct xen_netif_extra_info *extras, + int work_to_do) +{ + struct xen_netif_extra_info extra; + RING_IDX cons = vif->tx.req_cons; + + do { + if (unlikely(work_to_do-- <= 0)) { + netdev_dbg(vif->dev, "Missing extra info\n"); + return -EBADR; + } + + memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons), + sizeof(extra)); + if (unlikely(!extra.type || + extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { + vif->tx.req_cons = ++cons; + netdev_dbg(vif->dev, + "Invalid extra type: %d\n", extra.type); + return -EINVAL; + } + + memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); + vif->tx.req_cons = ++cons; + } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); + + return work_to_do; +} + +static int netbk_set_skb_gso(struct xenvif *vif, + struct sk_buff *skb, + struct xen_netif_extra_info *gso) +{ + if (!gso->u.gso.size) { + netdev_dbg(vif->dev, "GSO size must not be zero.\n"); + return -EINVAL; + } + + /* Currently only TCPv4 S.O. is supported. */ + if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { + netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type); + return -EINVAL; + } + + skb_shinfo(skb)->gso_size = gso->u.gso.size; + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + /* Header must be checked, and gso_segs computed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + + return 0; +} + +static int checksum_setup(struct xenvif *vif, struct sk_buff *skb) +{ + struct iphdr *iph; + unsigned char *th; + int err = -EPROTO; + int recalculate_partial_csum = 0; + + /* + * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy + * peers can fail to set NETRXF_csum_blank when sending a GSO + * frame. In this case force the SKB to CHECKSUM_PARTIAL and + * recalculate the partial checksum. + */ + if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { + vif->rx_gso_checksum_fixup++; + skb->ip_summed = CHECKSUM_PARTIAL; + recalculate_partial_csum = 1; + } + + /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (skb->protocol != htons(ETH_P_IP)) + goto out; + + iph = (void *)skb->data; + th = skb->data + 4 * iph->ihl; + if (th >= skb_tail_pointer(skb)) + goto out; + + skb->csum_start = th - skb->head; + switch (iph->protocol) { + case IPPROTO_TCP: + skb->csum_offset = offsetof(struct tcphdr, check); + + if (recalculate_partial_csum) { + struct tcphdr *tcph = (struct tcphdr *)th; + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - iph->ihl*4, + IPPROTO_TCP, 0); + } + break; + case IPPROTO_UDP: + skb->csum_offset = offsetof(struct udphdr, check); + + if (recalculate_partial_csum) { + struct udphdr *udph = (struct udphdr *)th; + udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - iph->ihl*4, + IPPROTO_UDP, 0); + } + break; + default: + if (net_ratelimit()) + netdev_err(vif->dev, + "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n", + iph->protocol); + goto out; + } + + if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) + goto out; + + err = 0; + +out: + return err; +} + +static bool tx_credit_exceeded(struct xenvif *vif, unsigned size) +{ + unsigned long now = jiffies; + unsigned long next_credit = + vif->credit_timeout.expires + + msecs_to_jiffies(vif->credit_usec / 1000); + + /* Timer could already be pending in rare cases. */ + if (timer_pending(&vif->credit_timeout)) + return true; + + /* Passed the point where we can replenish credit? */ + if (time_after_eq(now, next_credit)) { + vif->credit_timeout.expires = now; + tx_add_credit(vif); + } + + /* Still too big to send right now? Set a callback. */ + if (size > vif->remaining_credit) { + vif->credit_timeout.data = + (unsigned long)vif; + vif->credit_timeout.function = + tx_credit_callback; + mod_timer(&vif->credit_timeout, + next_credit); + + return true; + } + + return false; +} + +static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk) +{ + struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop; + struct sk_buff *skb; + int ret; + + while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && + !list_empty(&netbk->net_schedule_list)) { + struct xenvif *vif; + struct xen_netif_tx_request txreq; + struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS]; + struct page *page; + struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; + u16 pending_idx; + RING_IDX idx; + int work_to_do; + unsigned int data_len; + pending_ring_idx_t index; + + /* Get a netif from the list with work to do. */ + vif = poll_net_schedule_list(netbk); + if (!vif) + continue; + + RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do); + if (!work_to_do) { + xenvif_put(vif); + continue; + } + + idx = vif->tx.req_cons; + rmb(); /* Ensure that we see the request before we copy it. */ + memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq)); + + /* Credit-based scheduling. */ + if (txreq.size > vif->remaining_credit && + tx_credit_exceeded(vif, txreq.size)) { + xenvif_put(vif); + continue; + } + + vif->remaining_credit -= txreq.size; + + work_to_do--; + vif->tx.req_cons = ++idx; + + memset(extras, 0, sizeof(extras)); + if (txreq.flags & XEN_NETTXF_extra_info) { + work_to_do = xen_netbk_get_extras(vif, extras, + work_to_do); + idx = vif->tx.req_cons; + if (unlikely(work_to_do < 0)) { + netbk_tx_err(vif, &txreq, idx); + continue; + } + } + + ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do); + if (unlikely(ret < 0)) { + netbk_tx_err(vif, &txreq, idx - ret); + continue; + } + idx += ret; + + if (unlikely(txreq.size < ETH_HLEN)) { + netdev_dbg(vif->dev, + "Bad packet size: %d\n", txreq.size); + netbk_tx_err(vif, &txreq, idx); + continue; + } + + /* No crossing a page as the payload mustn't fragment. */ + if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) { + netdev_dbg(vif->dev, + "txreq.offset: %x, size: %u, end: %lu\n", + txreq.offset, txreq.size, + (txreq.offset&~PAGE_MASK) + txreq.size); + netbk_tx_err(vif, &txreq, idx); + continue; + } + + index = pending_index(netbk->pending_cons); + pending_idx = netbk->pending_ring[index]; + + data_len = (txreq.size > PKT_PROT_LEN && + ret < MAX_SKB_FRAGS) ? + PKT_PROT_LEN : txreq.size; + + skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(skb == NULL)) { + netdev_dbg(vif->dev, + "Can't allocate a skb in start_xmit.\n"); + netbk_tx_err(vif, &txreq, idx); + break; + } + + /* Packets passed to netif_rx() must have some headroom. */ + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + + if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { + struct xen_netif_extra_info *gso; + gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; + + if (netbk_set_skb_gso(vif, skb, gso)) { + kfree_skb(skb); + netbk_tx_err(vif, &txreq, idx); + continue; + } + } + + /* XXX could copy straight to head */ + page = xen_netbk_alloc_page(netbk, skb, pending_idx); + if (!page) { + kfree_skb(skb); + netbk_tx_err(vif, &txreq, idx); + continue; + } + + netbk->mmap_pages[pending_idx] = page; + + gop->source.u.ref = txreq.gref; + gop->source.domid = vif->domid; + gop->source.offset = txreq.offset; + + gop->dest.u.gmfn = virt_to_mfn(page_address(page)); + gop->dest.domid = DOMID_SELF; + gop->dest.offset = txreq.offset; + + gop->len = txreq.size; + gop->flags = GNTCOPY_source_gref; + + gop++; + + memcpy(&netbk->pending_tx_info[pending_idx].req, + &txreq, sizeof(txreq)); + netbk->pending_tx_info[pending_idx].vif = vif; + *((u16 *)skb->data) = pending_idx; + + __skb_put(skb, data_len); + + skb_shinfo(skb)->nr_frags = ret; + if (data_len < txreq.size) { + skb_shinfo(skb)->nr_frags++; + skb_shinfo(skb)->frags[0].page = + (void *)(unsigned long)pending_idx; + } else { + /* Discriminate from any valid pending_idx value. */ + skb_shinfo(skb)->frags[0].page = (void *)~0UL; + } + + __skb_queue_tail(&netbk->tx_queue, skb); + + netbk->pending_cons++; + + request_gop = xen_netbk_get_requests(netbk, vif, + skb, txfrags, gop); + if (request_gop == NULL) { + kfree_skb(skb); + netbk_tx_err(vif, &txreq, idx); + continue; + } + gop = request_gop; + + vif->tx.req_cons = idx; + xen_netbk_check_rx_xenvif(vif); + + if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops)) + break; + } + + return gop - netbk->tx_copy_ops; +} + +static void xen_netbk_tx_submit(struct xen_netbk *netbk) +{ + struct gnttab_copy *gop = netbk->tx_copy_ops; + struct sk_buff *skb; + + while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) { + struct xen_netif_tx_request *txp; + struct xenvif *vif; + u16 pending_idx; + unsigned data_len; + + pending_idx = *((u16 *)skb->data); + vif = netbk->pending_tx_info[pending_idx].vif; + txp = &netbk->pending_tx_info[pending_idx].req; + + /* Check the remap error code. */ + if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) { + netdev_dbg(vif->dev, "netback grant failed.\n"); + skb_shinfo(skb)->nr_frags = 0; + kfree_skb(skb); + continue; + } + + data_len = skb->len; + memcpy(skb->data, + (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset), + data_len); + if (data_len < txp->size) { + /* Append the packet payload as a fragment. */ + txp->offset += data_len; + txp->size -= data_len; + } else { + /* Schedule a response immediately. */ + xen_netbk_idx_release(netbk, pending_idx); + } + + if (txp->flags & XEN_NETTXF_csum_blank) + skb->ip_summed = CHECKSUM_PARTIAL; + else if (txp->flags & XEN_NETTXF_data_validated) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + xen_netbk_fill_frags(netbk, skb); + + /* + * If the initial fragment was < PKT_PROT_LEN then + * pull through some bytes from the other fragments to + * increase the linear region to PKT_PROT_LEN bytes. + */ + if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) { + int target = min_t(int, skb->len, PKT_PROT_LEN); + __pskb_pull_tail(skb, target - skb_headlen(skb)); + } + + skb->dev = vif->dev; + skb->protocol = eth_type_trans(skb, skb->dev); + + if (checksum_setup(vif, skb)) { + netdev_dbg(vif->dev, + "Can't setup checksum in net_tx_action\n"); + kfree_skb(skb); + continue; + } + + vif->dev->stats.rx_bytes += skb->len; + vif->dev->stats.rx_packets++; + + xenvif_receive_skb(vif, skb); + } +} + +/* Called after netfront has transmitted */ +static void xen_netbk_tx_action(struct xen_netbk *netbk) +{ + unsigned nr_gops; + int ret; + + nr_gops = xen_netbk_tx_build_gops(netbk); + + if (nr_gops == 0) + return; + ret = HYPERVISOR_grant_table_op(GNTTABOP_copy, + netbk->tx_copy_ops, nr_gops); + BUG_ON(ret); + + xen_netbk_tx_submit(netbk); + +} + +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx) +{ + struct xenvif *vif; + struct pending_tx_info *pending_tx_info; + pending_ring_idx_t index; + + /* Already complete? */ + if (netbk->mmap_pages[pending_idx] == NULL) + return; + + pending_tx_info = &netbk->pending_tx_info[pending_idx]; + + vif = pending_tx_info->vif; + + make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY); + + index = pending_index(netbk->pending_prod++); + netbk->pending_ring[index] = pending_idx; + + xenvif_put(vif); + + netbk->mmap_pages[pending_idx]->mapping = 0; + put_page(netbk->mmap_pages[pending_idx]); + netbk->mmap_pages[pending_idx] = NULL; +} + +static void make_tx_response(struct xenvif *vif, + struct xen_netif_tx_request *txp, + s8 st) +{ + RING_IDX i = vif->tx.rsp_prod_pvt; + struct xen_netif_tx_response *resp; + int notify; + + resp = RING_GET_RESPONSE(&vif->tx, i); + resp->id = txp->id; + resp->status = st; + + if (txp->flags & XEN_NETTXF_extra_info) + RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL; + + vif->tx.rsp_prod_pvt = ++i; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); + if (notify) + notify_remote_via_irq(vif->irq); +} + +static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, + u16 id, + s8 st, + u16 offset, + u16 size, + u16 flags) +{ + RING_IDX i = vif->rx.rsp_prod_pvt; + struct xen_netif_rx_response *resp; + + resp = RING_GET_RESPONSE(&vif->rx, i); + resp->offset = offset; + resp->flags = flags; + resp->id = id; + resp->status = (s16)size; + if (st < 0) + resp->status = (s16)st; + + vif->rx.rsp_prod_pvt = ++i; + + return resp; +} + +static inline int rx_work_todo(struct xen_netbk *netbk) +{ + return !skb_queue_empty(&netbk->rx_queue); +} + +static inline int tx_work_todo(struct xen_netbk *netbk) +{ + + if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) && + !list_empty(&netbk->net_schedule_list)) + return 1; + + return 0; +} + +static int xen_netbk_kthread(void *data) +{ + struct xen_netbk *netbk = data; + while (!kthread_should_stop()) { + wait_event_interruptible(netbk->wq, + rx_work_todo(netbk) || + tx_work_todo(netbk) || + kthread_should_stop()); + cond_resched(); + + if (kthread_should_stop()) + break; + + if (rx_work_todo(netbk)) + xen_netbk_rx_action(netbk); + + if (tx_work_todo(netbk)) + xen_netbk_tx_action(netbk); + } + + return 0; +} + +void xen_netbk_unmap_frontend_rings(struct xenvif *vif) +{ + struct gnttab_unmap_grant_ref op; + + if (vif->tx.sring) { + gnttab_set_unmap_op(&op, (unsigned long)vif->tx_comms_area->addr, + GNTMAP_host_map, vif->tx_shmem_handle); + + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) + BUG(); + } + + if (vif->rx.sring) { + gnttab_set_unmap_op(&op, (unsigned long)vif->rx_comms_area->addr, + GNTMAP_host_map, vif->rx_shmem_handle); + + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) + BUG(); + } + if (vif->rx_comms_area) + free_vm_area(vif->rx_comms_area); + if (vif->tx_comms_area) + free_vm_area(vif->tx_comms_area); +} + +int xen_netbk_map_frontend_rings(struct xenvif *vif, + grant_ref_t tx_ring_ref, + grant_ref_t rx_ring_ref) +{ + struct gnttab_map_grant_ref op; + struct xen_netif_tx_sring *txs; + struct xen_netif_rx_sring *rxs; + + int err = -ENOMEM; + + vif->tx_comms_area = alloc_vm_area(PAGE_SIZE); + if (vif->tx_comms_area == NULL) + goto err; + + vif->rx_comms_area = alloc_vm_area(PAGE_SIZE); + if (vif->rx_comms_area == NULL) + goto err; + + gnttab_set_map_op(&op, (unsigned long)vif->tx_comms_area->addr, + GNTMAP_host_map, tx_ring_ref, vif->domid); + + if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) + BUG(); + + if (op.status) { + netdev_warn(vif->dev, + "failed to map tx ring. err=%d status=%d\n", + err, op.status); + err = op.status; + goto err; + } + + vif->tx_shmem_ref = tx_ring_ref; + vif->tx_shmem_handle = op.handle; + + txs = (struct xen_netif_tx_sring *)vif->tx_comms_area->addr; + BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE); + + gnttab_set_map_op(&op, (unsigned long)vif->rx_comms_area->addr, + GNTMAP_host_map, rx_ring_ref, vif->domid); + + if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) + BUG(); + + if (op.status) { + netdev_warn(vif->dev, + "failed to map rx ring. err=%d status=%d\n", + err, op.status); + err = op.status; + goto err; + } + + vif->rx_shmem_ref = rx_ring_ref; + vif->rx_shmem_handle = op.handle; + vif->rx_req_cons_peek = 0; + + rxs = (struct xen_netif_rx_sring *)vif->rx_comms_area->addr; + BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE); + + return 0; + +err: + xen_netbk_unmap_frontend_rings(vif); + return err; +} + +static int __init netback_init(void) +{ + int i; + int rc = 0; + int group; + + if (!xen_pv_domain()) + return -ENODEV; + + xen_netbk_group_nr = num_online_cpus(); + xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr); + if (!xen_netbk) { + printk(KERN_ALERT "%s: out of memory\n", __func__); + return -ENOMEM; + } + + for (group = 0; group < xen_netbk_group_nr; group++) { + struct xen_netbk *netbk = &xen_netbk[group]; + skb_queue_head_init(&netbk->rx_queue); + skb_queue_head_init(&netbk->tx_queue); + + init_timer(&netbk->net_timer); + netbk->net_timer.data = (unsigned long)netbk; + netbk->net_timer.function = xen_netbk_alarm; + + netbk->pending_cons = 0; + netbk->pending_prod = MAX_PENDING_REQS; + for (i = 0; i < MAX_PENDING_REQS; i++) + netbk->pending_ring[i] = i; + + init_waitqueue_head(&netbk->wq); + netbk->task = kthread_create(xen_netbk_kthread, + (void *)netbk, + "netback/%u", group); + + if (IS_ERR(netbk->task)) { + printk(KERN_ALERT "kthread_run() fails at netback\n"); + del_timer(&netbk->net_timer); + rc = PTR_ERR(netbk->task); + goto failed_init; + } + + kthread_bind(netbk->task, group); + + INIT_LIST_HEAD(&netbk->net_schedule_list); + + spin_lock_init(&netbk->net_schedule_list_lock); + + atomic_set(&netbk->netfront_count, 0); + + wake_up_process(netbk->task); + } + + rc = xenvif_xenbus_init(); + if (rc) + goto failed_init; + + return 0; + +failed_init: + while (--group >= 0) { + struct xen_netbk *netbk = &xen_netbk[group]; + for (i = 0; i < MAX_PENDING_REQS; i++) { + if (netbk->mmap_pages[i]) + __free_page(netbk->mmap_pages[i]); + } + del_timer(&netbk->net_timer); + kthread_stop(netbk->task); + } + vfree(xen_netbk); + return rc; + +} + +module_init(netback_init); + +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c new file mode 100644 index 000000000000..22b8c3505991 --- /dev/null +++ b/drivers/net/xen-netback/xenbus.c @@ -0,0 +1,490 @@ +/* + * Xenbus code for netif backend + * + * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au> + * Copyright (C) 2005 XenSource Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#include "common.h" + +struct backend_info { + struct xenbus_device *dev; + struct xenvif *vif; + enum xenbus_state frontend_state; + struct xenbus_watch hotplug_status_watch; + int have_hotplug_status_watch:1; +}; + +static int connect_rings(struct backend_info *); +static void connect(struct backend_info *); +static void backend_create_xenvif(struct backend_info *be); +static void unregister_hotplug_status_watch(struct backend_info *be); + +static int netback_remove(struct xenbus_device *dev) +{ + struct backend_info *be = dev_get_drvdata(&dev->dev); + + unregister_hotplug_status_watch(be); + if (be->vif) { + kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); + xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); + xenvif_disconnect(be->vif); + be->vif = NULL; + } + kfree(be); + dev_set_drvdata(&dev->dev, NULL); + return 0; +} + + +/** + * Entry point to this code when a new device is created. Allocate the basic + * structures and switch to InitWait. + */ +static int netback_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + const char *message; + struct xenbus_transaction xbt; + int err; + int sg; + struct backend_info *be = kzalloc(sizeof(struct backend_info), + GFP_KERNEL); + if (!be) { + xenbus_dev_fatal(dev, -ENOMEM, + "allocating backend structure"); + return -ENOMEM; + } + + be->dev = dev; + dev_set_drvdata(&dev->dev, be); + + sg = 1; + + do { + err = xenbus_transaction_start(&xbt); + if (err) { + xenbus_dev_fatal(dev, err, "starting transaction"); + goto fail; + } + + err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg); + if (err) { + message = "writing feature-sg"; + goto abort_transaction; + } + + err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", + "%d", sg); + if (err) { + message = "writing feature-gso-tcpv4"; + goto abort_transaction; + } + + /* We support rx-copy path. */ + err = xenbus_printf(xbt, dev->nodename, + "feature-rx-copy", "%d", 1); + if (err) { + message = "writing feature-rx-copy"; + goto abort_transaction; + } + + /* + * We don't support rx-flip path (except old guests who don't + * grok this feature flag). + */ + err = xenbus_printf(xbt, dev->nodename, + "feature-rx-flip", "%d", 0); + if (err) { + message = "writing feature-rx-flip"; + goto abort_transaction; + } + + err = xenbus_transaction_end(xbt, 0); + } while (err == -EAGAIN); + + if (err) { + xenbus_dev_fatal(dev, err, "completing transaction"); + goto fail; + } + + err = xenbus_switch_state(dev, XenbusStateInitWait); + if (err) + goto fail; + + /* This kicks hotplug scripts, so do it immediately. */ + backend_create_xenvif(be); + + return 0; + +abort_transaction: + xenbus_transaction_end(xbt, 1); + xenbus_dev_fatal(dev, err, "%s", message); +fail: + pr_debug("failed"); + netback_remove(dev); + return err; +} + + +/* + * Handle the creation of the hotplug script environment. We add the script + * and vif variables to the environment, for the benefit of the vif-* hotplug + * scripts. + */ +static int netback_uevent(struct xenbus_device *xdev, + struct kobj_uevent_env *env) +{ + struct backend_info *be = dev_get_drvdata(&xdev->dev); + char *val; + + val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL); + if (IS_ERR(val)) { + int err = PTR_ERR(val); + xenbus_dev_fatal(xdev, err, "reading script"); + return err; + } else { + if (add_uevent_var(env, "script=%s", val)) { + kfree(val); + return -ENOMEM; + } + kfree(val); + } + + if (!be || !be->vif) + return 0; + + return add_uevent_var(env, "vif=%s", be->vif->dev->name); +} + + +static void backend_create_xenvif(struct backend_info *be) +{ + int err; + long handle; + struct xenbus_device *dev = be->dev; + + if (be->vif != NULL) + return; + + err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); + if (err != 1) { + xenbus_dev_fatal(dev, err, "reading handle"); + return; + } + + be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); + if (IS_ERR(be->vif)) { + err = PTR_ERR(be->vif); + be->vif = NULL; + xenbus_dev_fatal(dev, err, "creating interface"); + return; + } + + kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); +} + + +static void disconnect_backend(struct xenbus_device *dev) +{ + struct backend_info *be = dev_get_drvdata(&dev->dev); + + if (be->vif) { + xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); + xenvif_disconnect(be->vif); + be->vif = NULL; + } +} + +/** + * Callback received when the frontend's state changes. + */ +static void frontend_changed(struct xenbus_device *dev, + enum xenbus_state frontend_state) +{ + struct backend_info *be = dev_get_drvdata(&dev->dev); + + pr_debug("frontend state %s", xenbus_strstate(frontend_state)); + + be->frontend_state = frontend_state; + + switch (frontend_state) { + case XenbusStateInitialising: + if (dev->state == XenbusStateClosed) { + printk(KERN_INFO "%s: %s: prepare for reconnect\n", + __func__, dev->nodename); + xenbus_switch_state(dev, XenbusStateInitWait); + } + break; + + case XenbusStateInitialised: + break; + + case XenbusStateConnected: + if (dev->state == XenbusStateConnected) + break; + backend_create_xenvif(be); + if (be->vif) + connect(be); + break; + + case XenbusStateClosing: + if (be->vif) + kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); + disconnect_backend(dev); + xenbus_switch_state(dev, XenbusStateClosing); + break; + + case XenbusStateClosed: + xenbus_switch_state(dev, XenbusStateClosed); + if (xenbus_dev_is_online(dev)) + break; + /* fall through if not online */ + case XenbusStateUnknown: + device_unregister(&dev->dev); + break; + + default: + xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", + frontend_state); + break; + } +} + + +static void xen_net_read_rate(struct xenbus_device *dev, + unsigned long *bytes, unsigned long *usec) +{ + char *s, *e; + unsigned long b, u; + char *ratestr; + + /* Default to unlimited bandwidth. */ + *bytes = ~0UL; + *usec = 0; + + ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL); + if (IS_ERR(ratestr)) + return; + + s = ratestr; + b = simple_strtoul(s, &e, 10); + if ((s == e) || (*e != ',')) + goto fail; + + s = e + 1; + u = simple_strtoul(s, &e, 10); + if ((s == e) || (*e != '\0')) + goto fail; + + *bytes = b; + *usec = u; + + kfree(ratestr); + return; + + fail: + pr_warn("Failed to parse network rate limit. Traffic unlimited.\n"); + kfree(ratestr); +} + +static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) +{ + char *s, *e, *macstr; + int i; + + macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); + if (IS_ERR(macstr)) + return PTR_ERR(macstr); + + for (i = 0; i < ETH_ALEN; i++) { + mac[i] = simple_strtoul(s, &e, 16); + if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { + kfree(macstr); + return -ENOENT; + } + s = e+1; + } + + kfree(macstr); + return 0; +} + +static void unregister_hotplug_status_watch(struct backend_info *be) +{ + if (be->have_hotplug_status_watch) { + unregister_xenbus_watch(&be->hotplug_status_watch); + kfree(be->hotplug_status_watch.node); + } + be->have_hotplug_status_watch = 0; +} + +static void hotplug_status_changed(struct xenbus_watch *watch, + const char **vec, + unsigned int vec_size) +{ + struct backend_info *be = container_of(watch, + struct backend_info, + hotplug_status_watch); + char *str; + unsigned int len; + + str = xenbus_read(XBT_NIL, be->dev->nodename, "hotplug-status", &len); + if (IS_ERR(str)) + return; + if (len == sizeof("connected")-1 && !memcmp(str, "connected", len)) { + xenbus_switch_state(be->dev, XenbusStateConnected); + /* Not interested in this watch anymore. */ + unregister_hotplug_status_watch(be); + } + kfree(str); +} + +static void connect(struct backend_info *be) +{ + int err; + struct xenbus_device *dev = be->dev; + + err = connect_rings(be); + if (err) + return; + + err = xen_net_read_mac(dev, be->vif->fe_dev_addr); + if (err) { + xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); + return; + } + + xen_net_read_rate(dev, &be->vif->credit_bytes, + &be->vif->credit_usec); + be->vif->remaining_credit = be->vif->credit_bytes; + + unregister_hotplug_status_watch(be); + err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, + hotplug_status_changed, + "%s/%s", dev->nodename, "hotplug-status"); + if (err) { + /* Switch now, since we can't do a watch. */ + xenbus_switch_state(dev, XenbusStateConnected); + } else { + be->have_hotplug_status_watch = 1; + } + + netif_wake_queue(be->vif->dev); +} + + +static int connect_rings(struct backend_info *be) +{ + struct xenvif *vif = be->vif; + struct xenbus_device *dev = be->dev; + unsigned long tx_ring_ref, rx_ring_ref; + unsigned int evtchn, rx_copy; + int err; + int val; + + err = xenbus_gather(XBT_NIL, dev->otherend, + "tx-ring-ref", "%lu", &tx_ring_ref, + "rx-ring-ref", "%lu", &rx_ring_ref, + "event-channel", "%u", &evtchn, NULL); + if (err) { + xenbus_dev_fatal(dev, err, + "reading %s/ring-ref and event-channel", + dev->otherend); + return err; + } + + err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", + &rx_copy); + if (err == -ENOENT) { + err = 0; + rx_copy = 0; + } + if (err < 0) { + xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy", + dev->otherend); + return err; + } + if (!rx_copy) + return -EOPNOTSUPP; + + if (vif->dev->tx_queue_len != 0) { + if (xenbus_scanf(XBT_NIL, dev->otherend, + "feature-rx-notify", "%d", &val) < 0) + val = 0; + if (val) + vif->can_queue = 1; + else + /* Must be non-zero for pfifo_fast to work. */ + vif->dev->tx_queue_len = 1; + } + + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", + "%d", &val) < 0) + val = 0; + vif->can_sg = !!val; + + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", + "%d", &val) < 0) + val = 0; + vif->gso = !!val; + + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", + "%d", &val) < 0) + val = 0; + vif->gso_prefix = !!val; + + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", + "%d", &val) < 0) + val = 0; + vif->csum = !val; + + /* Map the shared frame, irq etc. */ + err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref, evtchn); + if (err) { + xenbus_dev_fatal(dev, err, + "mapping shared-frames %lu/%lu port %u", + tx_ring_ref, rx_ring_ref, evtchn); + return err; + } + return 0; +} + + +/* ** Driver Registration ** */ + + +static const struct xenbus_device_id netback_ids[] = { + { "vif" }, + { "" } +}; + + +static struct xenbus_driver netback = { + .name = "vif", + .owner = THIS_MODULE, + .ids = netback_ids, + .probe = netback_probe, + .remove = netback_remove, + .uevent = netback_uevent, + .otherend_changed = frontend_changed, +}; + +int xenvif_xenbus_init(void) +{ + return xenbus_register_backend(&netback); +} diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index da1f12120346..5c8d9c385be0 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -122,7 +122,7 @@ struct netfront_info { struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Statistics */ - int rx_gso_checksum_fixup; + unsigned long rx_gso_checksum_fixup; }; struct netfront_rx_info { @@ -359,7 +359,7 @@ static void xennet_tx_buf_gc(struct net_device *dev) struct xen_netif_tx_response *txrsp; txrsp = RING_GET_RESPONSE(&np->tx, cons); - if (txrsp->status == NETIF_RSP_NULL) + if (txrsp->status == XEN_NETIF_RSP_NULL) continue; id = txrsp->id; @@ -416,7 +416,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, larger than a page), split it it into page-sized chunks. */ while (len > PAGE_SIZE - offset) { tx->size = PAGE_SIZE - offset; - tx->flags |= NETTXF_more_data; + tx->flags |= XEN_NETTXF_more_data; len -= tx->size; data += tx->size; offset = 0; @@ -442,7 +442,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; - tx->flags |= NETTXF_more_data; + tx->flags |= XEN_NETTXF_more_data; id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); np->tx_skbs[id].skb = skb_get(skb); @@ -517,10 +517,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) tx->flags = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */ - tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; + tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; else if (skb->ip_summed == CHECKSUM_UNNECESSARY) /* remote but checksummed. */ - tx->flags |= NETTXF_data_validated; + tx->flags |= XEN_NETTXF_data_validated; if (skb_shinfo(skb)->gso_size) { struct xen_netif_extra_info *gso; @@ -531,7 +531,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (extra) extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; else - tx->flags |= NETTXF_extra_info; + tx->flags |= XEN_NETTXF_extra_info; gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; @@ -651,7 +651,7 @@ static int xennet_get_responses(struct netfront_info *np, int err = 0; unsigned long ret; - if (rx->flags & NETRXF_extra_info) { + if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(np, extras, rp); cons = np->rx.rsp_cons; } @@ -688,7 +688,7 @@ static int xennet_get_responses(struct netfront_info *np, __skb_queue_tail(list, skb); next: - if (!(rx->flags & NETRXF_more_data)) + if (!(rx->flags & XEN_NETRXF_more_data)) break; if (cons + frags == rp) { @@ -983,9 +983,9 @@ err: skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->len += skb->data_len; - if (rx->flags & NETRXF_csum_blank) + if (rx->flags & XEN_NETRXF_csum_blank) skb->ip_summed = CHECKSUM_PARTIAL; - else if (rx->flags & NETRXF_data_validated) + else if (rx->flags & XEN_NETRXF_data_validated) skb->ip_summed = CHECKSUM_UNNECESSARY; __skb_queue_tail(&rxq, skb); @@ -1692,7 +1692,7 @@ static void xennet_get_ethtool_stats(struct net_device *dev, int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) - data[i] = *(int *)(np + xennet_stats[i].offset); + data[i] = *(unsigned long *)(np + xennet_stats[i].offset); } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index f47a714538db..af3f7b095647 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -225,7 +225,8 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, /*****************************************************************************/ #define QETH_MAX_QUEUES 4 #define QETH_IN_BUF_SIZE_DEFAULT 65536 -#define QETH_IN_BUF_COUNT_DEFAULT 16 +#define QETH_IN_BUF_COUNT_DEFAULT 64 +#define QETH_IN_BUF_COUNT_HSDEFAULT 128 #define QETH_IN_BUF_COUNT_MIN 8 #define QETH_IN_BUF_COUNT_MAX 128 #define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) @@ -741,7 +742,6 @@ struct qeth_card { /* QDIO buffer handling */ struct qeth_qdio_info qdio; struct qeth_perf_stats perf_stats; - int use_hard_stop; int read_or_write_problem; struct qeth_osn_info osn_info; struct qeth_discipline discipline; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 019ae58ab913..25eef304bd47 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -302,12 +302,15 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, int com = cmd->hdr.command; ipa_name = qeth_get_ipa_cmd_name(com); if (rc) - QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s returned x%X \"%s\"\n", - ipa_name, com, QETH_CARD_IFNAME(card), - rc, qeth_get_ipa_msg(rc)); + QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned " + "x%X \"%s\"\n", + ipa_name, com, dev_name(&card->gdev->dev), + QETH_CARD_IFNAME(card), rc, + qeth_get_ipa_msg(rc)); else - QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s succeeded\n", - ipa_name, com, QETH_CARD_IFNAME(card)); + QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n", + ipa_name, com, dev_name(&card->gdev->dev), + QETH_CARD_IFNAME(card)); } static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, @@ -1023,7 +1026,10 @@ static void qeth_init_qdio_info(struct qeth_card *card) atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); /* inbound */ card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; - card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; + if (card->info.type == QETH_CARD_TYPE_IQD) + card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; + else + card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); @@ -1083,7 +1089,6 @@ static int qeth_setup_card(struct qeth_card *card) card->data.state = CH_STATE_DOWN; card->state = CARD_STATE_DOWN; card->lan_online = 0; - card->use_hard_stop = 0; card->read_or_write_problem = 0; card->dev = NULL; spin_lock_init(&card->vlanlock); @@ -1732,20 +1737,22 @@ int qeth_send_control_data(struct qeth_card *card, int len, }; } + if (reply->rc == -EIO) + goto error; rc = reply->rc; qeth_put_reply(reply); return rc; time_err: + reply->rc = -ETIME; spin_lock_irqsave(&reply->card->lock, flags); list_del_init(&reply->list); spin_unlock_irqrestore(&reply->card->lock, flags); - reply->rc = -ETIME; atomic_inc(&reply->received); +error: atomic_set(&card->write.irq_pending, 0); qeth_release_buffer(iob->channel, iob); card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; - wake_up(&reply->wait_q); rc = reply->rc; qeth_put_reply(reply); return rc; @@ -2490,45 +2497,19 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, } EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); -static int qeth_send_startstoplan(struct qeth_card *card, - enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot) -{ - int rc; - struct qeth_cmd_buffer *iob; - - iob = qeth_get_ipacmd_buffer(card, ipacmd, prot); - rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); - - return rc; -} - int qeth_send_startlan(struct qeth_card *card) { int rc; + struct qeth_cmd_buffer *iob; QETH_DBF_TEXT(SETUP, 2, "strtlan"); - rc = qeth_send_startstoplan(card, IPA_CMD_STARTLAN, 0); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); return rc; } EXPORT_SYMBOL_GPL(qeth_send_startlan); -int qeth_send_stoplan(struct qeth_card *card) -{ - int rc = 0; - - /* - * TODO: according to the IPA format document page 14, - * TCP/IP (we!) never issue a STOPLAN - * is this right ?!? - */ - QETH_DBF_TEXT(SETUP, 2, "stoplan"); - - rc = qeth_send_startstoplan(card, IPA_CMD_STOPLAN, 0); - return rc; -} -EXPORT_SYMBOL_GPL(qeth_send_stoplan); - int qeth_default_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index ada0fe782373..6fbaacb21943 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -202,17 +202,19 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) kfree(mc); } -static void qeth_l2_del_all_mc(struct qeth_card *card) +static void qeth_l2_del_all_mc(struct qeth_card *card, int del) { struct qeth_mc_mac *mc, *tmp; spin_lock_bh(&card->mclock); list_for_each_entry_safe(mc, tmp, &card->mc_list, list) { - if (mc->is_vmac) - qeth_l2_send_setdelmac(card, mc->mc_addr, + if (del) { + if (mc->is_vmac) + qeth_l2_send_setdelmac(card, mc->mc_addr, IPA_CMD_DELVMAC, NULL); - else - qeth_l2_send_delgroupmac(card, mc->mc_addr); + else + qeth_l2_send_delgroupmac(card, mc->mc_addr); + } list_del(&mc->list); kfree(mc); } @@ -288,18 +290,13 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, qeth_l2_send_setdelvlan_cb, NULL); } -static void qeth_l2_process_vlans(struct qeth_card *card, int clear) +static void qeth_l2_process_vlans(struct qeth_card *card) { struct qeth_vlan_vid *id; QETH_CARD_TEXT(card, 3, "L2prcvln"); spin_lock_bh(&card->vlanlock); list_for_each_entry(id, &card->vid_list, list) { - if (clear) - qeth_l2_send_setdelvlan(card, id->vid, - IPA_CMD_DELVLAN); - else - qeth_l2_send_setdelvlan(card, id->vid, - IPA_CMD_SETVLAN); + qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN); } spin_unlock_bh(&card->vlanlock); } @@ -379,19 +376,11 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) dev_close(card->dev); rtnl_unlock(); } - if (!card->use_hard_stop || - recovery_mode) { - __u8 *mac = &card->dev->dev_addr[0]; - rc = qeth_l2_send_delmac(card, mac); - QETH_DBF_TEXT_(SETUP, 2, "Lerr%d", rc); - } + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; card->state = CARD_STATE_SOFTSETUP; } if (card->state == CARD_STATE_SOFTSETUP) { - qeth_l2_process_vlans(card, 1); - if (!card->use_hard_stop || - recovery_mode) - qeth_l2_del_all_mc(card); + qeth_l2_del_all_mc(card, 0); qeth_clear_ipacmd_list(card); card->state = CARD_STATE_HARDSETUP; } @@ -405,7 +394,6 @@ static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) qeth_clear_cmd_buffers(&card->read); qeth_clear_cmd_buffers(&card->write); } - card->use_hard_stop = 0; return rc; } @@ -705,7 +693,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev) if (qeth_threads_running(card, QETH_RECOVER_THREAD) && (card->state != CARD_STATE_UP)) return; - qeth_l2_del_all_mc(card); + qeth_l2_del_all_mc(card, 1); spin_lock_bh(&card->mclock); netdev_for_each_mc_addr(ha, dev) qeth_l2_add_mc(card, ha->addr, 0); @@ -907,10 +895,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) qeth_set_allowed_threads(card, 0, 1); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); - if (cgdev->state == CCWGROUP_ONLINE) { - card->use_hard_stop = 1; + if (cgdev->state == CCWGROUP_ONLINE) qeth_l2_set_offline(cgdev); - } if (card->dev) { unregister_netdev(card->dev); @@ -1040,7 +1026,7 @@ contin: if (card->info.type != QETH_CARD_TYPE_OSN && card->info.type != QETH_CARD_TYPE_OSM) - qeth_l2_process_vlans(card, 0); + qeth_l2_process_vlans(card); netif_tx_disable(card->dev); @@ -1076,7 +1062,6 @@ contin: return 0; out_remove: - card->use_hard_stop = 1; qeth_l2_stop_card(card, 0); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); @@ -1144,7 +1129,6 @@ static int qeth_l2_recover(void *ptr) QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); - card->use_hard_stop = 1; __qeth_l2_set_offline(card->gdev, 1); rc = __qeth_l2_set_online(card->gdev, 1); if (!rc) @@ -1191,7 +1175,6 @@ static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) if (gdev->state == CCWGROUP_OFFLINE) return 0; if (card->state == CARD_STATE_UP) { - card->use_hard_stop = 1; __qeth_l2_set_offline(card->gdev, 1); } else __qeth_l2_set_offline(card->gdev, 0); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index d09b0c44fc3d..142e5f6ef4f3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -510,8 +510,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card) kfree(tbd_list); } -static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean, - int recover) +static void qeth_l3_clear_ip_list(struct qeth_card *card, int recover) { struct qeth_ipaddr *addr, *tmp; unsigned long flags; @@ -530,11 +529,6 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean, addr = list_entry(card->ip_list.next, struct qeth_ipaddr, entry); list_del_init(&addr->entry); - if (clean) { - spin_unlock_irqrestore(&card->ip_lock, flags); - qeth_l3_deregister_addr_entry(card, addr); - spin_lock_irqsave(&card->ip_lock, flags); - } if (!recover || addr->is_multicast) { kfree(addr); continue; @@ -1611,29 +1605,6 @@ static int qeth_l3_start_ipassists(struct qeth_card *card) return 0; } -static int qeth_l3_put_unique_id(struct qeth_card *card) -{ - - int rc = 0; - struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; - - QETH_CARD_TEXT(card, 2, "puniqeid"); - - if ((card->info.unique_id & UNIQUE_ID_NOT_BY_CARD) == - UNIQUE_ID_NOT_BY_CARD) - return -1; - iob = qeth_get_ipacmd_buffer(card, IPA_CMD_DESTROY_ADDR, - QETH_PROT_IPV6); - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = - card->info.unique_id; - memcpy(&cmd->data.create_destroy_addr.unique_id[0], - card->dev->dev_addr, OSA_ADDR_LEN); - rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); - return rc; -} - static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -2324,25 +2295,14 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) dev_close(card->dev); rtnl_unlock(); } - if (!card->use_hard_stop) { - rc = qeth_send_stoplan(card); - if (rc) - QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); - } card->state = CARD_STATE_SOFTSETUP; } if (card->state == CARD_STATE_SOFTSETUP) { - qeth_l3_clear_ip_list(card, !card->use_hard_stop, 1); + qeth_l3_clear_ip_list(card, 1); qeth_clear_ipacmd_list(card); card->state = CARD_STATE_HARDSETUP; } if (card->state == CARD_STATE_HARDSETUP) { - if (!card->use_hard_stop && - (card->info.type != QETH_CARD_TYPE_IQD)) { - rc = qeth_l3_put_unique_id(card); - if (rc) - QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); - } qeth_qdio_clear_card(card, 0); qeth_clear_qdio_buffers(card); qeth_clear_working_pool_list(card); @@ -2352,7 +2312,6 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) qeth_clear_cmd_buffers(&card->read); qeth_clear_cmd_buffers(&card->write); } - card->use_hard_stop = 0; return rc; } @@ -3433,6 +3392,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) card->dev->dev_id = card->info.unique_id & 0xffff; + if (!card->info.guestlan) + card->dev->features |= NETIF_F_GRO; } } else if (card->info.type == QETH_CARD_TYPE_IQD) { card->dev = alloc_netdev(0, "hsi%d", ether_setup); @@ -3471,6 +3432,9 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) card->discipline.output_handler = (qdio_handler_t *) qeth_qdio_output_handler; card->discipline.recover = qeth_l3_recover; + if ((card->info.type == QETH_CARD_TYPE_OSD) || + (card->info.type == QETH_CARD_TYPE_OSX)) + card->options.checksum_type = HW_CHECKSUMMING; return 0; } @@ -3483,17 +3447,15 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) qeth_set_allowed_threads(card, 0, 1); wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); - if (cgdev->state == CCWGROUP_ONLINE) { - card->use_hard_stop = 1; + if (cgdev->state == CCWGROUP_ONLINE) qeth_l3_set_offline(cgdev); - } if (card->dev) { unregister_netdev(card->dev); card->dev = NULL; } - qeth_l3_clear_ip_list(card, 0, 0); + qeth_l3_clear_ip_list(card, 0); qeth_l3_clear_ipato_list(card); return; } @@ -3594,7 +3556,6 @@ contin: mutex_unlock(&card->discipline_mutex); return 0; out_remove: - card->use_hard_stop = 1; qeth_l3_stop_card(card, 0); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); @@ -3663,7 +3624,6 @@ static int qeth_l3_recover(void *ptr) QETH_CARD_TEXT(card, 2, "recover2"); dev_warn(&card->gdev->dev, "A recovery process has been started for the device\n"); - card->use_hard_stop = 1; __qeth_l3_set_offline(card->gdev, 1); rc = __qeth_l3_set_online(card->gdev, 1); if (!rc) @@ -3684,7 +3644,6 @@ static int qeth_l3_recover(void *ptr) static void qeth_l3_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - qeth_l3_clear_ip_list(card, 0, 0); qeth_qdio_clear_card(card, 0); qeth_clear_qdio_buffers(card); } @@ -3700,7 +3659,6 @@ static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) if (gdev->state == CCWGROUP_OFFLINE) return 0; if (card->state == CARD_STATE_UP) { - card->use_hard_stop = 1; __qeth_l3_set_offline(card->gdev, 1); } else __qeth_l3_set_offline(card->gdev, 0); diff --git a/drivers/scsi/cxgbi/cxgb3i/Kconfig b/drivers/scsi/cxgbi/cxgb3i/Kconfig index 5cf4e9831f1b..11dff23f7838 100644 --- a/drivers/scsi/cxgbi/cxgb3i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb3i/Kconfig @@ -1,6 +1,8 @@ config SCSI_CXGB3_ISCSI tristate "Chelsio T3 iSCSI support" - depends on CHELSIO_T3_DEPENDS + depends on PCI && INET + select NETDEVICES + select NETDEV_10000 select CHELSIO_T3 select SCSI_ISCSI_ATTRS ---help--- diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig index bb94b39b17b3..d5302c27f377 100644 --- a/drivers/scsi/cxgbi/cxgb4i/Kconfig +++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig @@ -1,6 +1,8 @@ config SCSI_CXGB4_ISCSI tristate "Chelsio T4 iSCSI support" - depends on CHELSIO_T4_DEPENDS + depends on PCI && INET + select NETDEVICES + select NETDEV_10000 select CHELSIO_T4 select SCSI_ISCSI_ATTRS ---help--- diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index d2ad3d676724..a24dff9f9163 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -451,26 +451,13 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) } static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr, - __be16 sport, __be16 dport, u8 tos) + __be16 sport, __be16 dport, u8 tos) { struct rtable *rt; - struct flowi fl = { - .oif = 0, - .nl_u = { - .ip4_u = { - .daddr = daddr, - .saddr = saddr, - .tos = tos } - }, - .proto = IPPROTO_TCP, - .uli_u = { - .ports = { - .sport = sport, - .dport = dport } - } - }; - if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) + rt = ip_route_output_ports(&init_net, NULL, daddr, saddr, + dport, sport, IPPROTO_TCP, tos, 0); + if (IS_ERR(rt)) return NULL; return rt; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 9f9600b67001..3becc6a20a4f 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -285,9 +285,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, } /* Do not support for bonding device */ - if ((netdev->priv_flags & IFF_MASTER_ALB) || - (netdev->priv_flags & IFF_SLAVE_INACTIVE) || - (netdev->priv_flags & IFF_MASTER_8023AD)) { + if (netdev->priv_flags & IFF_BONDING && netdev->flags & IFF_MASTER) { FCOE_NETDEV_DBG(netdev, "Bonded interfaces not supported\n"); return -EOPNOTSUPP; } diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 3918d2cc5856..e05ba6eefc7e 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -1192,10 +1192,10 @@ void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags) } EXPORT_SYMBOL(ssb_device_enable); -/* Wait for a bit in a register to get set or unset. +/* Wait for bitmask in a register to get set or cleared. * timeout is in units of ten-microseconds */ -static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask, - int timeout, int set) +static int ssb_wait_bits(struct ssb_device *dev, u16 reg, u32 bitmask, + int timeout, int set) { int i; u32 val; @@ -1203,7 +1203,7 @@ static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask, for (i = 0; i < timeout; i++) { val = ssb_read32(dev, reg); if (set) { - if (val & bitmask) + if ((val & bitmask) == bitmask) return 0; } else { if (!(val & bitmask)) @@ -1220,20 +1220,38 @@ static int ssb_wait_bit(struct ssb_device *dev, u16 reg, u32 bitmask, void ssb_device_disable(struct ssb_device *dev, u32 core_specific_flags) { - u32 reject; + u32 reject, val; if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_RESET) return; reject = ssb_tmslow_reject_bitmask(dev); - ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK); - ssb_wait_bit(dev, SSB_TMSLOW, reject, 1000, 1); - ssb_wait_bit(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0); - ssb_write32(dev, SSB_TMSLOW, - SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | - reject | SSB_TMSLOW_RESET | - core_specific_flags); - ssb_flush_tmslow(dev); + + if (ssb_read32(dev, SSB_TMSLOW) & SSB_TMSLOW_CLOCK) { + ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_CLOCK); + ssb_wait_bits(dev, SSB_TMSLOW, reject, 1000, 1); + ssb_wait_bits(dev, SSB_TMSHIGH, SSB_TMSHIGH_BUSY, 1000, 0); + + if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) { + val = ssb_read32(dev, SSB_IMSTATE); + val |= SSB_IMSTATE_REJECT; + ssb_write32(dev, SSB_IMSTATE, val); + ssb_wait_bits(dev, SSB_IMSTATE, SSB_IMSTATE_BUSY, 1000, + 0); + } + + ssb_write32(dev, SSB_TMSLOW, + SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | + reject | SSB_TMSLOW_RESET | + core_specific_flags); + ssb_flush_tmslow(dev); + + if (ssb_read32(dev, SSB_IDLOW) & SSB_IDLOW_INITIATOR) { + val = ssb_read32(dev, SSB_IMSTATE); + val &= ~SSB_IMSTATE_REJECT; + ssb_write32(dev, SSB_IMSTATE, val); + } + } ssb_write32(dev, SSB_TMSLOW, reject | SSB_TMSLOW_RESET | diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 158449e55044..a467b20baac8 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -468,10 +468,14 @@ static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in) SPEX(country_code, SSB_SPROM4_CCODE, 0xFFFF, 0); SPEX(boardflags_lo, SSB_SPROM4_BFLLO, 0xFFFF, 0); SPEX(boardflags_hi, SSB_SPROM4_BFLHI, 0xFFFF, 0); + SPEX(boardflags2_lo, SSB_SPROM4_BFL2LO, 0xFFFF, 0); + SPEX(boardflags2_hi, SSB_SPROM4_BFL2HI, 0xFFFF, 0); } else { SPEX(country_code, SSB_SPROM5_CCODE, 0xFFFF, 0); SPEX(boardflags_lo, SSB_SPROM5_BFLLO, 0xFFFF, 0); SPEX(boardflags_hi, SSB_SPROM5_BFLHI, 0xFFFF, 0); + SPEX(boardflags2_lo, SSB_SPROM5_BFL2LO, 0xFFFF, 0); + SPEX(boardflags2_hi, SSB_SPROM5_BFL2HI, 0xFFFF, 0); } SPEX(ant_available_a, SSB_SPROM4_ANTAVAIL, SSB_SPROM4_ANTAVAIL_A, SSB_SPROM4_ANTAVAIL_A_SHIFT); @@ -641,7 +645,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, break; default: ssb_printk(KERN_WARNING PFX "Unsupported SPROM" - " revision %d detected. Will extract" + " revision %d detected. Will extract" " v1\n", out->revision); out->revision = 1; sprom_extract_r123(out, in); diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c index 9e74beb0b64b..555b056b49b1 100644 --- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c @@ -2308,7 +2308,9 @@ static s32 wl_inform_single_bss(struct wl_priv *wl, struct wl_bss_info *bi) notif_bss_info->frame_len = offsetof(struct ieee80211_mgmt, u.beacon.variable) + wl_get_ielen(wl); - freq = ieee80211_channel_to_frequency(notif_bss_info->channel); + freq = ieee80211_channel_to_frequency(notif_bss_info->channel, + band->band); + channel = ieee80211_get_channel(wiphy, freq); WL_DBG("SSID : \"%s\", rssi %d, channel %d, capability : 0x04%x, bssid %pM\n", diff --git a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c b/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c index 66708d8df7f6..774b4e916b29 100644 --- a/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c +++ b/drivers/staging/brcm80211/brcmsmac/wl_mac80211.c @@ -74,9 +74,6 @@ static int wl_request_fw(struct wl_info *wl, struct pci_dev *pdev); static void wl_release_fw(struct wl_info *wl); /* local prototypes */ -static int wl_start(struct sk_buff *skb, struct wl_info *wl); -static int wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw, - struct sk_buff *skb); static void wl_dpc(unsigned long data); static irqreturn_t wl_isr(int irq, void *dev_id); @@ -111,7 +108,6 @@ module_param(phymsglevel, int, 0); #define WL_TO_HW(wl) (wl->pub->ieee_hw) /* MAC80211 callback functions */ -static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb); static int wl_ops_start(struct ieee80211_hw *hw); static void wl_ops_stop(struct ieee80211_hw *hw); static int wl_ops_add_interface(struct ieee80211_hw *hw, @@ -152,21 +148,19 @@ static int wl_ops_ampdu_action(struct ieee80211_hw *hw, u8 buf_size); static void wl_ops_rfkill_poll(struct ieee80211_hw *hw); -static int wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb) +static void wl_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { - int status; struct wl_info *wl = hw->priv; WL_LOCK(wl); if (!wl->pub->up) { WL_ERROR("ops->tx called while down\n"); - status = -ENETDOWN; + kfree_skb(skb); goto done; } - status = wl_start(skb, wl); + wlc_sendpkt_mac80211(wl->wlc, skb, hw); done: WL_UNLOCK(wl); - return status; } static int wl_ops_start(struct ieee80211_hw *hw) @@ -1396,25 +1390,6 @@ static void wl_free(struct wl_info *wl) } /* - * transmit a packet - * precondition: perimeter lock has been acquired - */ -static int BCMFASTPATH wl_start(struct sk_buff *skb, struct wl_info *wl) -{ - if (!wl) - return -ENETDOWN; - - return wl_start_int(wl, WL_TO_HW(wl), skb); -} - -static int BCMFASTPATH -wl_start_int(struct wl_info *wl, struct ieee80211_hw *hw, struct sk_buff *skb) -{ - wlc_sendpkt_mac80211(wl->wlc, skb, hw); - return NETDEV_TX_OK; -} - -/* * precondition: perimeter lock has been acquired */ void wl_txflowcontrol(struct wl_info *wl, struct wl_if *wlif, bool state, diff --git a/drivers/staging/brcm80211/brcmsmac/wlc_main.c b/drivers/staging/brcm80211/brcmsmac/wlc_main.c index 0870dc913cda..639b5d7c9603 100644 --- a/drivers/staging/brcm80211/brcmsmac/wlc_main.c +++ b/drivers/staging/brcm80211/brcmsmac/wlc_main.c @@ -6838,11 +6838,14 @@ prep_mac80211_status(struct wlc_info *wlc, d11rxhdr_t *rxh, struct sk_buff *p, ratespec_t rspec; unsigned char *plcp; +#if 0 + /* Clearly, this is bogus -- reading the TSF now is wrong */ wlc_read_tsf(wlc, &tsf_l, &tsf_h); /* mactime */ rx_status->mactime = tsf_h; rx_status->mactime <<= 32; rx_status->mactime |= tsf_l; - rx_status->flag |= RX_FLAG_TSFT; + rx_status->flag |= RX_FLAG_MACTIME_MPDU; /* clearly wrong */ +#endif channel = WLC_CHAN_CHANNEL(rxh->RxChan); diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c index a9a3e25a7efa..b6c42cb0d1c6 100644 --- a/drivers/staging/pohmelfs/config.c +++ b/drivers/staging/pohmelfs/config.c @@ -525,7 +525,7 @@ static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *n { int err; - if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) + if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) return; switch (msg->flags) { diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c index 2163d60c2eaf..3724e1e67ec2 100644 --- a/drivers/staging/winbond/wbusb.c +++ b/drivers/staging/winbond/wbusb.c @@ -118,13 +118,14 @@ static void wbsoft_configure_filter(struct ieee80211_hw *dev, *total_flags = new_flags; } -static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb) +static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb) { struct wbsoft_priv *priv = dev->priv; if (priv->sMlmeFrame.IsInUsed != PACKET_FREE_TO_USE) { priv->sMlmeFrame.wNumTxMMPDUDiscarded++; - return NETDEV_TX_BUSY; + kfree_skb(skb); + return; } priv->sMlmeFrame.IsInUsed = PACKET_COME_FROM_MLME; @@ -140,8 +141,6 @@ static int wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb) */ Mds_Tx(priv); - - return NETDEV_TX_OK; } static int wbsoft_start(struct ieee80211_hw *dev) diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index 52ec0959d462..5180a215d781 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -73,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns struct uvesafb_task *utask; struct uvesafb_ktask *task; - if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) + if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) return; if (msg->seq >= UVESAFB_TASKS_MAX) diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 0ad1699a1b3e..65f5068afd84 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -794,6 +794,21 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) return irq; } +static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, + unsigned int remote_port) +{ + struct evtchn_bind_interdomain bind_interdomain; + int err; + + bind_interdomain.remote_dom = remote_domain; + bind_interdomain.remote_port = remote_port; + + err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, + &bind_interdomain); + + return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); +} + int bind_virq_to_irq(unsigned int virq, unsigned int cpu) { @@ -889,6 +904,29 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn, } EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); +int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, + unsigned int remote_port, + irq_handler_t handler, + unsigned long irqflags, + const char *devname, + void *dev_id) +{ + int irq, retval; + + irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); + if (irq < 0) + return irq; + + retval = request_irq(irq, handler, irqflags, devname, dev_id); + if (retval != 0) { + unbind_from_irq(irq); + return retval; + } + + return irq; +} +EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); + int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) |