summaryrefslogtreecommitdiffstats
path: root/drivers/staging/wusbcore
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-08-06 12:15:09 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-08-08 07:52:01 +0200
commit71ed79b0e4be0db254640c3beb9a1a0316eb5f61 (patch)
treeaac2f4f0527802478b29c2e7897e56994b6ad759 /drivers/staging/wusbcore
parent7d9c1d2f7aca2651b3821947bf928ee131df102a (diff)
downloadlinux-71ed79b0e4be0db254640c3beb9a1a0316eb5f61.tar.bz2
USB: Move wusbcore and UWB to staging as it is obsolete
The UWB and wusbcore code is long obsolete, so let us just move the code out of the real part of the kernel and into the drivers/staging/ location with plans to remove it entirely in a few releases. Link: https://lore.kernel.org/r/20190806101509.GA11280@kroah.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging/wusbcore')
-rw-r--r--drivers/staging/wusbcore/Documentation/wusb-cbaf130
-rw-r--r--drivers/staging/wusbcore/Documentation/wusb-design-overview.rst457
-rw-r--r--drivers/staging/wusbcore/Kconfig39
-rw-r--r--drivers/staging/wusbcore/Makefile28
-rw-r--r--drivers/staging/wusbcore/TODO8
-rw-r--r--drivers/staging/wusbcore/cbaf.c645
-rw-r--r--drivers/staging/wusbcore/crypto.c441
-rw-r--r--drivers/staging/wusbcore/dev-sysfs.c124
-rw-r--r--drivers/staging/wusbcore/devconnect.c1085
-rw-r--r--drivers/staging/wusbcore/host/Kconfig28
-rw-r--r--drivers/staging/wusbcore/host/Makefile3
-rw-r--r--drivers/staging/wusbcore/host/hwa-hc.c875
-rw-r--r--drivers/staging/wusbcore/host/whci/Makefile14
-rw-r--r--drivers/staging/wusbcore/host/whci/asl.c376
-rw-r--r--drivers/staging/wusbcore/host/whci/debug.c153
-rw-r--r--drivers/staging/wusbcore/host/whci/hcd.c356
-rw-r--r--drivers/staging/wusbcore/host/whci/hw.c93
-rw-r--r--drivers/staging/wusbcore/host/whci/init.c177
-rw-r--r--drivers/staging/wusbcore/host/whci/int.c82
-rw-r--r--drivers/staging/wusbcore/host/whci/pzl.c404
-rw-r--r--drivers/staging/wusbcore/host/whci/qset.c831
-rw-r--r--drivers/staging/wusbcore/host/whci/whcd.h202
-rw-r--r--drivers/staging/wusbcore/host/whci/whci-hc.h401
-rw-r--r--drivers/staging/wusbcore/host/whci/wusb.c210
-rw-r--r--drivers/staging/wusbcore/include/association.h151
-rw-r--r--drivers/staging/wusbcore/include/wusb-wa.h304
-rw-r--r--drivers/staging/wusbcore/include/wusb.h362
-rw-r--r--drivers/staging/wusbcore/mmc.c303
-rw-r--r--drivers/staging/wusbcore/pal.c45
-rw-r--r--drivers/staging/wusbcore/reservation.c110
-rw-r--r--drivers/staging/wusbcore/rh.c426
-rw-r--r--drivers/staging/wusbcore/security.c599
-rw-r--r--drivers/staging/wusbcore/wa-hc.c88
-rw-r--r--drivers/staging/wusbcore/wa-hc.h467
-rw-r--r--drivers/staging/wusbcore/wa-nep.c289
-rw-r--r--drivers/staging/wusbcore/wa-rpipe.c539
-rw-r--r--drivers/staging/wusbcore/wa-xfer.c2927
-rw-r--r--drivers/staging/wusbcore/wusbhc.c490
-rw-r--r--drivers/staging/wusbcore/wusbhc.h487
39 files changed, 14749 insertions, 0 deletions
diff --git a/drivers/staging/wusbcore/Documentation/wusb-cbaf b/drivers/staging/wusbcore/Documentation/wusb-cbaf
new file mode 100644
index 000000000000..8b3d43efce90
--- /dev/null
+++ b/drivers/staging/wusbcore/Documentation/wusb-cbaf
@@ -0,0 +1,130 @@
+#! /bin/bash
+#
+
+set -e
+
+progname=$(basename $0)
+function help
+{
+ cat <<EOF
+Usage: $progname COMMAND DEVICEs [ARGS]
+
+Command for manipulating the pairing/authentication credentials of a
+Wireless USB device that supports wired-mode Cable-Based-Association.
+
+Works in conjunction with the wusb-cba.ko driver from http://linuxuwb.org.
+
+
+DEVICE
+
+ sysfs path to the device to authenticate; for example, both this
+ guys are the same:
+
+ /sys/devices/pci0000:00/0000:00:1d.7/usb1/1-4/1-4.4/1-4.4:1.1
+ /sys/bus/usb/drivers/wusb-cbaf/1-4.4:1.1
+
+COMMAND/ARGS are
+
+ start
+
+ Start a WUSB host controller (by setting up a CHID)
+
+ set-chid DEVICE HOST-CHID HOST-BANDGROUP HOST-NAME
+
+ Sets host information in the device; after this you can call the
+ get-cdid to see how does this device report itself to us.
+
+ get-cdid DEVICE
+
+ Get the device ID associated to the HOST-CHID we sent with
+ 'set-chid'. We might not know about it.
+
+ set-cc DEVICE
+
+ If we allow the device to connect, set a random new CDID and CK
+ (connection key). Device saves them for the next time it wants to
+ connect wireless. We save them for that next time also so we can
+ authenticate the device (when we see the CDID he uses to id
+ itself) and the CK to crypto talk to it.
+
+CHID is always 16 hex bytes in 'XX YY ZZ...' form
+BANDGROUP is almost always 0001
+
+Examples:
+
+ You can default most arguments to '' to get a sane value:
+
+ $ $progname set-chid '' '' '' "My host name"
+
+ A full sequence:
+
+ $ $progname set-chid '' '' '' "My host name"
+ $ $progname get-cdid ''
+ $ $progname set-cc ''
+
+EOF
+}
+
+
+# Defaults
+# FIXME: CHID should come from a database :), band group from the host
+host_CHID="00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff"
+host_band_group="0001"
+host_name=$(hostname)
+
+devs="$(echo /sys/bus/usb/drivers/wusb-cbaf/[0-9]*)"
+hdevs="$(for h in /sys/class/uwb_rc/*/wusbhc; do readlink -f $h; done)"
+
+result=0
+case $1 in
+ start)
+ for dev in ${2:-$hdevs}
+ do
+ echo $host_CHID > $dev/wusb_chid
+ echo I: started host $(basename $dev) >&2
+ done
+ ;;
+ stop)
+ for dev in ${2:-$hdevs}
+ do
+ echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid
+ echo I: stopped host $(basename $dev) >&2
+ done
+ ;;
+ set-chid)
+ shift
+ for dev in ${2:-$devs}; do
+ echo "${4:-$host_name}" > $dev/wusb_host_name
+ echo "${3:-$host_band_group}" > $dev/wusb_host_band_groups
+ echo ${2:-$host_CHID} > $dev/wusb_chid
+ done
+ ;;
+ get-cdid)
+ for dev in ${2:-$devs}
+ do
+ cat $dev/wusb_cdid
+ done
+ ;;
+ set-cc)
+ for dev in ${2:-$devs}; do
+ shift
+ CDID="$(head --bytes=16 /dev/urandom | od -tx1 -An)"
+ CK="$(head --bytes=16 /dev/urandom | od -tx1 -An)"
+ echo "$CDID" > $dev/wusb_cdid
+ echo "$CK" > $dev/wusb_ck
+
+ echo I: CC set >&2
+ echo "CHID: $(cat $dev/wusb_chid)"
+ echo "CDID:$CDID"
+ echo "CK: $CK"
+ done
+ ;;
+ help|h|--help|-h)
+ help
+ ;;
+ *)
+ echo "E: Unknown usage" 1>&2
+ help 1>&2
+ result=1
+esac
+exit $result
diff --git a/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst b/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst
new file mode 100644
index 000000000000..dc5e21609bb5
--- /dev/null
+++ b/drivers/staging/wusbcore/Documentation/wusb-design-overview.rst
@@ -0,0 +1,457 @@
+================================
+Linux UWB + Wireless USB + WiNET
+================================
+
+ Copyright (C) 2005-2006 Intel Corporation
+
+ Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License version
+ 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ 02110-1301, USA.
+
+
+Please visit http://bughost.org/thewiki/Design-overview.txt-1.8 for
+updated content.
+
+ * Design-overview.txt-1.8
+
+This code implements a Ultra Wide Band stack for Linux, as well as
+drivers for the USB based UWB radio controllers defined in the
+Wireless USB 1.0 specification (including Wireless USB host controller
+and an Intel WiNET controller).
+
+.. Contents
+ 1. Introduction
+ 1. HWA: Host Wire adapters, your Wireless USB dongle
+
+ 2. DWA: Device Wired Adaptor, a Wireless USB hub for wired
+ devices
+ 3. WHCI: Wireless Host Controller Interface, the PCI WUSB host
+ adapter
+ 2. The UWB stack
+ 1. Devices and hosts: the basic structure
+
+ 2. Host Controller life cycle
+
+ 3. On the air: beacons and enumerating the radio neighborhood
+
+ 4. Device lists
+ 5. Bandwidth allocation
+
+ 3. Wireless USB Host Controller drivers
+
+ 4. Glossary
+
+
+Introduction
+============
+
+UWB is a wide-band communication protocol that is to serve also as the
+low-level protocol for others (much like TCP sits on IP). Currently
+these others are Wireless USB and TCP/IP, but seems Bluetooth and
+Firewire/1394 are coming along.
+
+UWB uses a band from roughly 3 to 10 GHz, transmitting at a max of
+~-41dB (or 0.074 uW/MHz--geography specific data is still being
+negotiated w/ regulators, so watch for changes). That band is divided in
+a bunch of ~1.5 GHz wide channels (or band groups) composed of three
+subbands/subchannels (528 MHz each). Each channel is independent of each
+other, so you could consider them different "busses". Initially this
+driver considers them all a single one.
+
+Radio time is divided in 65536 us long /superframes/, each one divided
+in 256 256us long /MASs/ (Media Allocation Slots), which are the basic
+time/media allocation units for transferring data. At the beginning of
+each superframe there is a Beacon Period (BP), where every device
+transmit its beacon on a single MAS. The length of the BP depends on how
+many devices are present and the length of their beacons.
+
+Devices have a MAC (fixed, 48 bit address) and a device (changeable, 16
+bit address) and send periodic beacons to advertise themselves and pass
+info on what they are and do. They advertise their capabilities and a
+bunch of other stuff.
+
+The different logical parts of this driver are:
+
+ *
+
+ *UWB*: the Ultra-Wide-Band stack -- manages the radio and
+ associated spectrum to allow for devices sharing it. Allows to
+ control bandwidth assignment, beaconing, scanning, etc
+
+ *
+
+ *WUSB*: the layer that sits on top of UWB to provide Wireless USB.
+ The Wireless USB spec defines means to control a UWB radio and to
+ do the actual WUSB.
+
+
+HWA: Host Wire adapters, your Wireless USB dongle
+-------------------------------------------------
+
+WUSB also defines a device called a Host Wire Adaptor (HWA), which in
+mere terms is a USB dongle that enables your PC to have UWB and Wireless
+USB. The Wireless USB Host Controller in a HWA looks to the host like a
+[Wireless] USB controller connected via USB (!)
+
+The HWA itself is broken in two or three main interfaces:
+
+ *
+
+ *RC*: Radio control -- this implements an interface to the
+ Ultra-Wide-Band radio controller. The driver for this implements a
+ USB-based UWB Radio Controller to the UWB stack.
+
+ *
+
+ *HC*: the wireless USB host controller. It looks like a USB host
+ whose root port is the radio and the WUSB devices connect to it.
+ To the system it looks like a separate USB host. The driver (will)
+ implement a USB host controller (similar to UHCI, OHCI or EHCI)
+ for which the root hub is the radio...To reiterate: it is a USB
+ controller that is connected via USB instead of PCI.
+
+ *
+
+ *WINET*: some HW provide a WiNET interface (IP over UWB). This
+ package provides a driver for it (it looks like a network
+ interface, winetX). The driver detects when there is a link up for
+ their type and kick into gear.
+
+
+DWA: Device Wired Adaptor, a Wireless USB hub for wired devices
+---------------------------------------------------------------
+
+These are the complement to HWAs. They are a USB host for connecting
+wired devices, but it is connected to your PC connected via Wireless
+USB. To the system it looks like yet another USB host. To the untrained
+eye, it looks like a hub that connects upstream wirelessly.
+
+We still offer no support for this; however, it should share a lot of
+code with the HWA-RC driver; there is a bunch of factorization work that
+has been done to support that in upcoming releases.
+
+
+WHCI: Wireless Host Controller Interface, the PCI WUSB host adapter
+-------------------------------------------------------------------
+
+This is your usual PCI device that implements WHCI. Similar in concept
+to EHCI, it allows your wireless USB devices (including DWAs) to connect
+to your host via a PCI interface. As in the case of the HWA, it has a
+Radio Control interface and the WUSB Host Controller interface per se.
+
+There is still no driver support for this, but will be in upcoming
+releases.
+
+
+The UWB stack
+=============
+
+The main mission of the UWB stack is to keep a tally of which devices
+are in radio proximity to allow drivers to connect to them. As well, it
+provides an API for controlling the local radio controllers (RCs from
+now on), such as to start/stop beaconing, scan, allocate bandwidth, etc.
+
+
+Devices and hosts: the basic structure
+--------------------------------------
+
+The main building block here is the UWB device (struct uwb_dev). For
+each device that pops up in radio presence (ie: the UWB host receives a
+beacon from it) you get a struct uwb_dev that will show up in
+/sys/bus/uwb/devices.
+
+For each RC that is detected, a new struct uwb_rc and struct uwb_dev are
+created. An entry is also created in /sys/class/uwb_rc for each RC.
+
+Each RC driver is implemented by a separate driver that plugs into the
+interface that the UWB stack provides through a struct uwb_rc_ops. The
+spec creators have been nice enough to make the message format the same
+for HWA and WHCI RCs, so the driver is really a very thin transport that
+moves the requests from the UWB API to the device [/uwb_rc_ops->cmd()/]
+and sends the replies and notifications back to the API
+[/uwb_rc_neh_grok()/]. Notifications are handled to the UWB daemon, that
+is chartered, among other things, to keep the tab of how the UWB radio
+neighborhood looks, creating and destroying devices as they show up or
+disappear.
+
+Command execution is very simple: a command block is sent and a event
+block or reply is expected back. For sending/receiving command/events, a
+handle called /neh/ (Notification/Event Handle) is opened with
+/uwb_rc_neh_open()/.
+
+The HWA-RC (USB dongle) driver (drivers/uwb/hwa-rc.c) does this job for
+the USB connected HWA. Eventually, drivers/whci-rc.c will do the same
+for the PCI connected WHCI controller.
+
+
+Host Controller life cycle
+--------------------------
+
+So let's say we connect a dongle to the system: it is detected and
+firmware uploaded if needed [for Intel's i1480
+/drivers/uwb/ptc/usb.c:ptc_usb_probe()/] and then it is reenumerated.
+Now we have a real HWA device connected and
+/drivers/uwb/hwa-rc.c:hwarc_probe()/ picks it up, that will set up the
+Wire-Adaptor environment and then suck it into the UWB stack's vision of
+the world [/drivers/uwb/lc-rc.c:uwb_rc_add()/].
+
+ *
+
+ [*] The stack should put a new RC to scan for devices
+ [/uwb_rc_scan()/] so it finds what's available around and tries to
+ connect to them, but this is policy stuff and should be driven
+ from user space. As of now, the operator is expected to do it
+ manually; see the release notes for documentation on the procedure.
+
+When a dongle is disconnected, /drivers/uwb/hwa-rc.c:hwarc_disconnect()/
+takes time of tearing everything down safely (or not...).
+
+
+On the air: beacons and enumerating the radio neighborhood
+----------------------------------------------------------
+
+So assuming we have devices and we have agreed for a channel to connect
+on (let's say 9), we put the new RC to beacon:
+
+ *
+
+ $ echo 9 0 > /sys/class/uwb_rc/uwb0/beacon
+
+Now it is visible. If there were other devices in the same radio channel
+and beacon group (that's what the zero is for), the dongle's radio
+control interface will send beacon notifications on its
+notification/event endpoint (NEEP). The beacon notifications are part of
+the event stream that is funneled into the API with
+/drivers/uwb/neh.c:uwb_rc_neh_grok()/ and delivered to the UWBD, the UWB
+daemon through a notification list.
+
+UWBD wakes up and scans the event list; finds a beacon and adds it to
+the BEACON CACHE (/uwb_beca/). If he receives a number of beacons from
+the same device, he considers it to be 'onair' and creates a new device
+[/drivers/uwb/lc-dev.c:uwbd_dev_onair()/]. Similarly, when no beacons
+are received in some time, the device is considered gone and wiped out
+[uwbd calls periodically /uwb/beacon.c:uwb_beca_purge()/ that will purge
+the beacon cache of dead devices].
+
+
+Device lists
+------------
+
+All UWB devices are kept in the list of the struct bus_type uwb_bus_type.
+
+
+Bandwidth allocation
+--------------------
+
+The UWB stack maintains a local copy of DRP availability through
+processing of incoming *DRP Availability Change* notifications. This
+local copy is currently used to present the current bandwidth
+availability to the user through the sysfs file
+/sys/class/uwb_rc/uwbx/bw_avail. In the future the bandwidth
+availability information will be used by the bandwidth reservation
+routines.
+
+The bandwidth reservation routines are in progress and are thus not
+present in the current release. When completed they will enable a user
+to initiate DRP reservation requests through interaction with sysfs. DRP
+reservation requests from remote UWB devices will also be handled. The
+bandwidth management done by the UWB stack will include callbacks to the
+higher layers will enable the higher layers to use the reservations upon
+completion. [Note: The bandwidth reservation work is in progress and
+subject to change.]
+
+
+Wireless USB Host Controller drivers
+====================================
+
+*WARNING* This section needs a lot of work!
+
+As explained above, there are three different types of HCs in the WUSB
+world: HWA-HC, DWA-HC and WHCI-HC.
+
+HWA-HC and DWA-HC share that they are Wire-Adapters (USB or WUSB
+connected controllers), and their transfer management system is almost
+identical. So is their notification delivery system.
+
+HWA-HC and WHCI-HC share that they are both WUSB host controllers, so
+they have to deal with WUSB device life cycle and maintenance, wireless
+root-hub
+
+HWA exposes a Host Controller interface (HWA-HC 0xe0/02/02). This has
+three endpoints (Notifications, Data Transfer In and Data Transfer
+Out--known as NEP, DTI and DTO in the code).
+
+We reserve UWB bandwidth for our Wireless USB Cluster, create a Cluster
+ID and tell the HC to use all that. Then we start it. This means the HC
+starts sending MMCs.
+
+ *
+
+ The MMCs are blocks of data defined somewhere in the WUSB1.0 spec
+ that define a stream in the UWB channel time allocated for sending
+ WUSB IEs (host to device commands/notifications) and Device
+ Notifications (device initiated to host). Each host defines a
+ unique Wireless USB cluster through MMCs. Devices can connect to a
+ single cluster at the time. The IEs are Information Elements, and
+ among them are the bandwidth allocations that tell each device
+ when can they transmit or receive.
+
+Now it all depends on external stimuli.
+
+New device connection
+---------------------
+
+A new device pops up, it scans the radio looking for MMCs that give out
+the existence of Wireless USB channels. Once one (or more) are found,
+selects which one to connect to. Sends a /DN_Connect/ (device
+notification connect) during the DNTS (Device Notification Time
+Slot--announced in the MMCs
+
+HC picks the /DN_Connect/ out (nep module sends to notif.c for delivery
+into /devconnect/). This process starts the authentication process for
+the device. First we allocate a /fake port/ and assign an
+unauthenticated address (128 to 255--what we really do is
+0x80 | fake_port_idx). We fiddle with the fake port status and /hub_wq/
+sees a new connection, so he moves on to enable the fake port with a reset.
+
+So now we are in the reset path -- we know we have a non-yet enumerated
+device with an unauthorized address; we ask user space to authenticate
+(FIXME: not yet done, similar to bluetooth pairing), then we do the key
+exchange (FIXME: not yet done) and issue a /set address 0/ to bring the
+device to the default state. Device is authenticated.
+
+From here, the USB stack takes control through the usb_hcd ops. hub_wq
+has seen the port status changes, as we have been toggling them. It will
+start enumerating and doing transfers through usb_hcd->urb_enqueue() to
+read descriptors and move our data.
+
+Device life cycle and keep alives
+---------------------------------
+
+Every time there is a successful transfer to/from a device, we update a
+per-device activity timestamp. If not, every now and then we check and
+if the activity timestamp gets old, we ping the device by sending it a
+Keep Alive IE; it responds with a /DN_Alive/ pong during the DNTS (this
+arrives to us as a notification through
+devconnect.c:wusb_handle_dn_alive(). If a device times out, we
+disconnect it from the system (cleaning up internal information and
+toggling the bits in the fake hub port, which kicks hub_wq into removing
+the rest of the stuff).
+
+This is done through devconnect:__wusb_check_devs(), which will scan the
+device list looking for whom needs refreshing.
+
+If the device wants to disconnect, it will either die (ugly) or send a
+/DN_Disconnect/ that will prompt a disconnection from the system.
+
+Sending and receiving data
+--------------------------
+
+Data is sent and received through /Remote Pipes/ (rpipes). An rpipe is
+/aimed/ at an endpoint in a WUSB device. This is the same for HWAs and
+DWAs.
+
+Each HC has a number of rpipes and buffers that can be assigned to them;
+when doing a data transfer (xfer), first the rpipe has to be aimed and
+prepared (buffers assigned), then we can start queueing requests for
+data in or out.
+
+Data buffers have to be segmented out before sending--so we send first a
+header (segment request) and then if there is any data, a data buffer
+immediately after to the DTI interface (yep, even the request). If our
+buffer is bigger than the max segment size, then we just do multiple
+requests.
+
+[This sucks, because doing USB scatter gatter in Linux is resource
+intensive, if any...not that the current approach is not. It just has to
+be cleaned up a lot :)].
+
+If reading, we don't send data buffers, just the segment headers saying
+we want to read segments.
+
+When the xfer is executed, we receive a notification that says data is
+ready in the DTI endpoint (handled through
+xfer.c:wa_handle_notif_xfer()). In there we read from the DTI endpoint a
+descriptor that gives us the status of the transfer, its identification
+(given when we issued it) and the segment number. If it was a data read,
+we issue another URB to read into the destination buffer the chunk of
+data coming out of the remote endpoint. Done, wait for the next guy. The
+callbacks for the URBs issued from here are the ones that will declare
+the xfer complete at some point and call its callback.
+
+Seems simple, but the implementation is not trivial.
+
+ *
+
+ *WARNING* Old!!
+
+The main xfer descriptor, wa_xfer (equivalent to a URB) contains an
+array of segments, tallys on segments and buffers and callback
+information. Buried in there is a lot of URBs for executing the segments
+and buffer transfers.
+
+For OUT xfers, there is an array of segments, one URB for each, another
+one of buffer URB. When submitting, we submit URBs for segment request
+1, buffer 1, segment 2, buffer 2...etc. Then we wait on the DTI for xfer
+result data; when all the segments are complete, we call the callback to
+finalize the transfer.
+
+For IN xfers, we only issue URBs for the segments we want to read and
+then wait for the xfer result data.
+
+URB mapping into xfers
+^^^^^^^^^^^^^^^^^^^^^^
+
+This is done by hwahc_op_urb_[en|de]queue(). In enqueue() we aim an
+rpipe to the endpoint where we have to transmit, create a transfer
+context (wa_xfer) and submit it. When the xfer is done, our callback is
+called and we assign the status bits and release the xfer resources.
+
+In dequeue() we are basically cancelling/aborting the transfer. We issue
+a xfer abort request to the HC, cancel all the URBs we had submitted
+and not yet done and when all that is done, the xfer callback will be
+called--this will call the URB callback.
+
+
+Glossary
+========
+
+*DWA* -- Device Wire Adapter
+
+USB host, wired for downstream devices, upstream connects wirelessly
+with Wireless USB.
+
+*EVENT* -- Response to a command on the NEEP
+
+*HWA* -- Host Wire Adapter / USB dongle for UWB and Wireless USB
+
+*NEH* -- Notification/Event Handle
+
+Handle/file descriptor for receiving notifications or events. The WA
+code requires you to get one of this to listen for notifications or
+events on the NEEP.
+
+*NEEP* -- Notification/Event EndPoint
+
+Stuff related to the management of the first endpoint of a HWA USB
+dongle that is used to deliver an stream of events and notifications to
+the host.
+
+*NOTIFICATION* -- Message coming in the NEEP as response to something.
+
+*RC* -- Radio Control
+
+Design-overview.txt-1.8 (last edited 2006-11-04 12:22:24 by
+InakyPerezGonzalez)
diff --git a/drivers/staging/wusbcore/Kconfig b/drivers/staging/wusbcore/Kconfig
new file mode 100644
index 000000000000..056c60b4d57f
--- /dev/null
+++ b/drivers/staging/wusbcore/Kconfig
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Wireless USB Core configuration
+#
+config USB_WUSB
+ tristate "Enable Wireless USB extensions"
+ depends on UWB
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_CCM
+ help
+ Enable the host-side support for Wireless USB.
+
+ To compile this support select Y (built in). It is safe to
+ select even if you don't have the hardware.
+
+config USB_WUSB_CBAF
+ tristate "Support WUSB Cable Based Association (CBA)"
+ depends on USB
+ help
+ Some WUSB devices support Cable Based Association. It's used to
+ enable the secure communication between the host and the
+ device.
+
+ Enable this option if your WUSB device must to be connected
+ via wired USB before establishing a wireless link.
+
+ It is safe to select even if you don't have a compatible
+ hardware.
+
+config USB_WUSB_CBAF_DEBUG
+ bool "Enable CBA debug messages"
+ depends on USB_WUSB_CBAF
+ help
+ Say Y here if you want the CBA to produce a bunch of debug messages
+ to the system log. Select this if you are having a problem with
+ CBA support and want to see more of what is going on.
+
+source "drivers/staging/wusbcore/host/Kconfig"
diff --git a/drivers/staging/wusbcore/Makefile b/drivers/staging/wusbcore/Makefile
new file mode 100644
index 000000000000..b47b874268ac
--- /dev/null
+++ b/drivers/staging/wusbcore/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-$(CONFIG_USB_WUSB_CBAF_DEBUG) := -DDEBUG
+
+obj-$(CONFIG_USB_WUSB) += wusbcore.o
+obj-$(CONFIG_USB_HWA_HCD) += wusb-wa.o
+obj-$(CONFIG_USB_WUSB_CBAF) += wusb-cbaf.o
+
+
+wusbcore-y := \
+ crypto.o \
+ devconnect.o \
+ dev-sysfs.o \
+ mmc.o \
+ pal.o \
+ rh.o \
+ reservation.o \
+ security.o \
+ wusbhc.o
+
+wusb-cbaf-y := cbaf.o
+
+wusb-wa-y := \
+ wa-hc.o \
+ wa-nep.o \
+ wa-rpipe.o \
+ wa-xfer.o
+
+obj-y += host/
diff --git a/drivers/staging/wusbcore/TODO b/drivers/staging/wusbcore/TODO
new file mode 100644
index 000000000000..abae57000534
--- /dev/null
+++ b/drivers/staging/wusbcore/TODO
@@ -0,0 +1,8 @@
+TODO: Remove in late 2019 unless there are users
+
+There seems to not be any real wireless USB devices anywhere in the wild
+anymore. It turned out to be a failed technology :(
+
+This will be removed from the tree if no one objects.
+
+Greg Kroah-Hartman <gregkh@linuxfoundation.org>
diff --git a/drivers/staging/wusbcore/cbaf.c b/drivers/staging/wusbcore/cbaf.c
new file mode 100644
index 000000000000..57062eaf7558
--- /dev/null
+++ b/drivers/staging/wusbcore/cbaf.c
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB - Cable Based Association
+ *
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ *
+ * WUSB devices have to be paired (associated in WUSB lingo) so
+ * that they can connect to the system.
+ *
+ * One way of pairing is using CBA-Cable Based Association. First
+ * time you plug the device with a cable, association is done between
+ * host and device and subsequent times, you can connect wirelessly
+ * without having to associate again. That's the idea.
+ *
+ * This driver does nothing Earth shattering. It just provides an
+ * interface to chat with the wire-connected device so we can get a
+ * CDID (device ID) that might have been previously associated to a
+ * CHID (host ID) and to set up a new <CHID,CDID,CK> triplet
+ * (connection context), with the CK being the secret, or connection
+ * key. This is the pairing data.
+ *
+ * When a device with the CBA capability connects, the probe routine
+ * just creates a bunch of sysfs files that a user space enumeration
+ * manager uses to allow it to connect wirelessly to the system or not.
+ *
+ * The process goes like this:
+ *
+ * 1. Device plugs, cbaf is loaded, notifications happen.
+ *
+ * 2. The connection manager (CM) sees a device with CBAF capability
+ * (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE).
+ *
+ * 3. The CM writes the host name, supported band groups, and the CHID
+ * (host ID) into the wusb_host_name, wusb_host_band_groups and
+ * wusb_chid files. These get sent to the device and the CDID (if
+ * any) for this host is requested.
+ *
+ * 4. The CM can verify that the device's supported band groups
+ * (wusb_device_band_groups) are compatible with the host.
+ *
+ * 5. The CM reads the wusb_cdid file.
+ *
+ * 6. The CM looks up its database
+ *
+ * 6.1 If it has a matching CHID,CDID entry, the device has been
+ * authorized before (paired) and nothing further needs to be
+ * done.
+ *
+ * 6.2 If the CDID is zero (or the CM doesn't find a matching CDID in
+ * its database), the device is assumed to be not known. The CM
+ * may associate the host with device by: writing a randomly
+ * generated CDID to wusb_cdid and then a random CK to wusb_ck
+ * (this uploads the new CC to the device).
+ *
+ * CMD may choose to prompt the user before associating with a new
+ * device.
+ *
+ * 7. Device is unplugged.
+ *
+ * When the device tries to connect wirelessly, it will present its
+ * CDID to the WUSB host controller. The CM will query the
+ * database. If the CHID/CDID pair found, it will (with a 4-way
+ * handshake) challenge the device to demonstrate it has the CK secret
+ * key (from our database) without actually exchanging it. Once
+ * satisfied, crypto keys are derived from the CK, the device is
+ * connected and all communication is encrypted.
+ *
+ * References:
+ * [WUSB-AM] Association Models Supplement to the Certified Wireless
+ * Universal Serial Bus Specification, version 1.0.
+ */
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/usb.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "../uwb/uwb.h"
+#include "include/wusb.h"
+#include "include/association.h"
+
+#define CBA_NAME_LEN 0x40 /* [WUSB-AM] table 4-7 */
+
+/* An instance of a Cable-Based-Association-Framework device */
+struct cbaf {
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_iface;
+ void *buffer;
+ size_t buffer_size;
+
+ struct wusb_ckhdid chid;
+ char host_name[CBA_NAME_LEN];
+ u16 host_band_groups;
+
+ struct wusb_ckhdid cdid;
+ char device_name[CBA_NAME_LEN];
+ u16 device_band_groups;
+
+ struct wusb_ckhdid ck;
+};
+
+/*
+ * Verify that a CBAF USB-interface has what we need
+ *
+ * According to [WUSB-AM], CBA devices should provide at least two
+ * interfaces:
+ * - RETRIEVE_HOST_INFO
+ * - ASSOCIATE
+ *
+ * If the device doesn't provide these interfaces, we do not know how
+ * to deal with it.
+ */
+static int cbaf_check(struct cbaf *cbaf)
+{
+ int result;
+ struct device *dev = &cbaf->usb_iface->dev;
+ struct wusb_cbaf_assoc_info *assoc_info;
+ struct wusb_cbaf_assoc_request *assoc_request;
+ size_t assoc_size;
+ void *itr, *top;
+ int ar_rhi = 0, ar_assoc = 0;
+
+ result = usb_control_msg(
+ cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
+ CBAF_REQ_GET_ASSOCIATION_INFORMATION,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ cbaf->buffer, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
+ if (result < 0) {
+ dev_err(dev, "Cannot get available association types: %d\n",
+ result);
+ return result;
+ }
+
+ assoc_info = cbaf->buffer;
+ if (result < sizeof(*assoc_info)) {
+ dev_err(dev, "Not enough data to decode association info "
+ "header (%zu vs %zu bytes required)\n",
+ (size_t)result, sizeof(*assoc_info));
+ return result;
+ }
+
+ assoc_size = le16_to_cpu(assoc_info->Length);
+ if (result < assoc_size) {
+ dev_err(dev, "Not enough data to decode association info "
+ "(%zu vs %zu bytes required)\n",
+ (size_t)assoc_size, sizeof(*assoc_info));
+ return result;
+ }
+ /*
+ * From now on, we just verify, but won't error out unless we
+ * don't find the AR_TYPE_WUSB_{RETRIEVE_HOST_INFO,ASSOCIATE}
+ * types.
+ */
+ itr = cbaf->buffer + sizeof(*assoc_info);
+ top = cbaf->buffer + assoc_size;
+ dev_dbg(dev, "Found %u association requests (%zu bytes)\n",
+ assoc_info->NumAssociationRequests, assoc_size);
+
+ while (itr < top) {
+ u16 ar_type, ar_subtype;
+ u32 ar_size;
+ const char *ar_name;
+
+ assoc_request = itr;
+
+ if (top - itr < sizeof(*assoc_request)) {
+ dev_err(dev, "Not enough data to decode association "
+ "request (%zu vs %zu bytes needed)\n",
+ top - itr, sizeof(*assoc_request));
+ break;
+ }
+
+ ar_type = le16_to_cpu(assoc_request->AssociationTypeId);
+ ar_subtype = le16_to_cpu(assoc_request->AssociationSubTypeId);
+ ar_size = le32_to_cpu(assoc_request->AssociationTypeInfoSize);
+ ar_name = "unknown";
+
+ switch (ar_type) {
+ case AR_TYPE_WUSB:
+ /* Verify we have what is mandated by [WUSB-AM]. */
+ switch (ar_subtype) {
+ case AR_TYPE_WUSB_RETRIEVE_HOST_INFO:
+ ar_name = "RETRIEVE_HOST_INFO";
+ ar_rhi = 1;
+ break;
+ case AR_TYPE_WUSB_ASSOCIATE:
+ /* send assoc data */
+ ar_name = "ASSOCIATE";
+ ar_assoc = 1;
+ break;
+ }
+ break;
+ }
+
+ dev_dbg(dev, "Association request #%02u: 0x%04x/%04x "
+ "(%zu bytes): %s\n",
+ assoc_request->AssociationDataIndex, ar_type,
+ ar_subtype, (size_t)ar_size, ar_name);
+
+ itr += sizeof(*assoc_request);
+ }
+
+ if (!ar_rhi) {
+ dev_err(dev, "Missing RETRIEVE_HOST_INFO association "
+ "request\n");
+ return -EINVAL;
+ }
+ if (!ar_assoc) {
+ dev_err(dev, "Missing ASSOCIATE association request\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct wusb_cbaf_host_info cbaf_host_info_defaults = {
+ .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
+ .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
+ .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
+ .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_RETRIEVE_HOST_INFO),
+ .CHID_hdr = WUSB_AR_CHID,
+ .LangID_hdr = WUSB_AR_LangID,
+ .HostFriendlyName_hdr = WUSB_AR_HostFriendlyName,
+};
+
+/* Send WUSB host information (CHID and name) to a CBAF device */
+static int cbaf_send_host_info(struct cbaf *cbaf)
+{
+ struct wusb_cbaf_host_info *hi;
+ size_t name_len;
+ size_t hi_size;
+
+ hi = cbaf->buffer;
+ memset(hi, 0, sizeof(*hi));
+ *hi = cbaf_host_info_defaults;
+ hi->CHID = cbaf->chid;
+ hi->LangID = 0; /* FIXME: I guess... */
+ strlcpy(hi->HostFriendlyName, cbaf->host_name, CBA_NAME_LEN);
+ name_len = strlen(cbaf->host_name);
+ hi->HostFriendlyName_hdr.len = cpu_to_le16(name_len);
+ hi_size = sizeof(*hi) + name_len;
+
+ return usb_control_msg(cbaf->usb_dev,
+ usb_sndctrlpipe(cbaf->usb_dev, 0),
+ CBAF_REQ_SET_ASSOCIATION_RESPONSE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0x0101,
+ cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ hi, hi_size, USB_CTRL_SET_TIMEOUT);
+}
+
+/*
+ * Get device's information (CDID) associated to CHID
+ *
+ * The device will return it's information (CDID, name, bandgroups)
+ * associated to the CHID we have set before, or 0 CDID and default
+ * name and bandgroup if no CHID set or unknown.
+ */
+static int cbaf_cdid_get(struct cbaf *cbaf)
+{
+ int result;
+ struct device *dev = &cbaf->usb_iface->dev;
+ struct wusb_cbaf_device_info *di;
+ size_t needed;
+
+ di = cbaf->buffer;
+ result = usb_control_msg(
+ cbaf->usb_dev, usb_rcvctrlpipe(cbaf->usb_dev, 0),
+ CBAF_REQ_GET_ASSOCIATION_REQUEST,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0x0200, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ di, cbaf->buffer_size, USB_CTRL_GET_TIMEOUT);
+ if (result < 0) {
+ dev_err(dev, "Cannot request device information: %d\n",
+ result);
+ return result;
+ }
+
+ needed = result < sizeof(*di) ? sizeof(*di) : le32_to_cpu(di->Length);
+ if (result < needed) {
+ dev_err(dev, "Not enough data in DEVICE_INFO reply (%zu vs "
+ "%zu bytes needed)\n", (size_t)result, needed);
+ return -ENOENT;
+ }
+
+ strlcpy(cbaf->device_name, di->DeviceFriendlyName, CBA_NAME_LEN);
+ cbaf->cdid = di->CDID;
+ cbaf->device_band_groups = le16_to_cpu(di->BandGroups);
+
+ return 0;
+}
+
+static ssize_t cbaf_wusb_chid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ return sprintf(buf, "%16ph\n", cbaf->chid.data);
+}
+
+static ssize_t cbaf_wusb_chid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ result = sscanf(buf,
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx",
+ &cbaf->chid.data[0] , &cbaf->chid.data[1],
+ &cbaf->chid.data[2] , &cbaf->chid.data[3],
+ &cbaf->chid.data[4] , &cbaf->chid.data[5],
+ &cbaf->chid.data[6] , &cbaf->chid.data[7],
+ &cbaf->chid.data[8] , &cbaf->chid.data[9],
+ &cbaf->chid.data[10], &cbaf->chid.data[11],
+ &cbaf->chid.data[12], &cbaf->chid.data[13],
+ &cbaf->chid.data[14], &cbaf->chid.data[15]);
+
+ if (result != 16)
+ return -EINVAL;
+
+ result = cbaf_send_host_info(cbaf);
+ if (result < 0)
+ return result;
+ result = cbaf_cdid_get(cbaf);
+ if (result < 0)
+ return result;
+ return size;
+}
+static DEVICE_ATTR(wusb_chid, 0600, cbaf_wusb_chid_show, cbaf_wusb_chid_store);
+
+static ssize_t cbaf_wusb_host_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->host_name);
+}
+
+static ssize_t cbaf_wusb_host_name_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ result = sscanf(buf, "%63s", cbaf->host_name);
+ if (result != 1)
+ return -EINVAL;
+
+ return size;
+}
+static DEVICE_ATTR(wusb_host_name, 0600, cbaf_wusb_host_name_show,
+ cbaf_wusb_host_name_store);
+
+static ssize_t cbaf_wusb_host_band_groups_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->host_band_groups);
+}
+
+static ssize_t cbaf_wusb_host_band_groups_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+ u16 band_groups = 0;
+
+ result = sscanf(buf, "%04hx", &band_groups);
+ if (result != 1)
+ return -EINVAL;
+
+ cbaf->host_band_groups = band_groups;
+
+ return size;
+}
+
+static DEVICE_ATTR(wusb_host_band_groups, 0600,
+ cbaf_wusb_host_band_groups_show,
+ cbaf_wusb_host_band_groups_store);
+
+static const struct wusb_cbaf_device_info cbaf_device_info_defaults = {
+ .Length_hdr = WUSB_AR_Length,
+ .CDID_hdr = WUSB_AR_CDID,
+ .BandGroups_hdr = WUSB_AR_BandGroups,
+ .LangID_hdr = WUSB_AR_LangID,
+ .DeviceFriendlyName_hdr = WUSB_AR_DeviceFriendlyName,
+};
+
+static ssize_t cbaf_wusb_cdid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ return sprintf(buf, "%16ph\n", cbaf->cdid.data);
+}
+
+static ssize_t cbaf_wusb_cdid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+ struct wusb_ckhdid cdid;
+
+ result = sscanf(buf,
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx",
+ &cdid.data[0] , &cdid.data[1],
+ &cdid.data[2] , &cdid.data[3],
+ &cdid.data[4] , &cdid.data[5],
+ &cdid.data[6] , &cdid.data[7],
+ &cdid.data[8] , &cdid.data[9],
+ &cdid.data[10], &cdid.data[11],
+ &cdid.data[12], &cdid.data[13],
+ &cdid.data[14], &cdid.data[15]);
+ if (result != 16)
+ return -EINVAL;
+
+ cbaf->cdid = cdid;
+
+ return size;
+}
+static DEVICE_ATTR(wusb_cdid, 0600, cbaf_wusb_cdid_show, cbaf_wusb_cdid_store);
+
+static ssize_t cbaf_wusb_device_band_groups_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%04x\n", cbaf->device_band_groups);
+}
+
+static DEVICE_ATTR(wusb_device_band_groups, 0600,
+ cbaf_wusb_device_band_groups_show,
+ NULL);
+
+static ssize_t cbaf_wusb_device_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", cbaf->device_name);
+}
+static DEVICE_ATTR(wusb_device_name, 0600, cbaf_wusb_device_name_show, NULL);
+
+static const struct wusb_cbaf_cc_data cbaf_cc_data_defaults = {
+ .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
+ .AssociationTypeId = cpu_to_le16(AR_TYPE_WUSB),
+ .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
+ .AssociationSubTypeId = cpu_to_le16(AR_TYPE_WUSB_ASSOCIATE),
+ .Length_hdr = WUSB_AR_Length,
+ .Length = cpu_to_le32(sizeof(struct wusb_cbaf_cc_data)),
+ .ConnectionContext_hdr = WUSB_AR_ConnectionContext,
+ .BandGroups_hdr = WUSB_AR_BandGroups,
+};
+
+static const struct wusb_cbaf_cc_data_fail cbaf_cc_data_fail_defaults = {
+ .AssociationTypeId_hdr = WUSB_AR_AssociationTypeId,
+ .AssociationSubTypeId_hdr = WUSB_AR_AssociationSubTypeId,
+ .Length_hdr = WUSB_AR_Length,
+ .AssociationStatus_hdr = WUSB_AR_AssociationStatus,
+};
+
+/*
+ * Send a new CC to the device.
+ */
+static int cbaf_cc_upload(struct cbaf *cbaf)
+{
+ int result;
+ struct device *dev = &cbaf->usb_iface->dev;
+ struct wusb_cbaf_cc_data *ccd;
+
+ ccd = cbaf->buffer;
+ *ccd = cbaf_cc_data_defaults;
+ ccd->CHID = cbaf->chid;
+ ccd->CDID = cbaf->cdid;
+ ccd->CK = cbaf->ck;
+ ccd->BandGroups = cpu_to_le16(cbaf->host_band_groups);
+
+ dev_dbg(dev, "Trying to upload CC:\n");
+ dev_dbg(dev, " CHID %16ph\n", ccd->CHID.data);
+ dev_dbg(dev, " CDID %16ph\n", ccd->CDID.data);
+ dev_dbg(dev, " Bandgroups 0x%04x\n", cbaf->host_band_groups);
+
+ result = usb_control_msg(
+ cbaf->usb_dev, usb_sndctrlpipe(cbaf->usb_dev, 0),
+ CBAF_REQ_SET_ASSOCIATION_RESPONSE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0x0201, cbaf->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ ccd, sizeof(*ccd), USB_CTRL_SET_TIMEOUT);
+
+ return result;
+}
+
+static ssize_t cbaf_wusb_ck_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ ssize_t result;
+ struct usb_interface *iface = to_usb_interface(dev);
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+
+ result = sscanf(buf,
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx",
+ &cbaf->ck.data[0] , &cbaf->ck.data[1],
+ &cbaf->ck.data[2] , &cbaf->ck.data[3],
+ &cbaf->ck.data[4] , &cbaf->ck.data[5],
+ &cbaf->ck.data[6] , &cbaf->ck.data[7],
+ &cbaf->ck.data[8] , &cbaf->ck.data[9],
+ &cbaf->ck.data[10], &cbaf->ck.data[11],
+ &cbaf->ck.data[12], &cbaf->ck.data[13],
+ &cbaf->ck.data[14], &cbaf->ck.data[15]);
+ if (result != 16)
+ return -EINVAL;
+
+ result = cbaf_cc_upload(cbaf);
+ if (result < 0)
+ return result;
+
+ return size;
+}
+static DEVICE_ATTR(wusb_ck, 0600, NULL, cbaf_wusb_ck_store);
+
+static struct attribute *cbaf_dev_attrs[] = {
+ &dev_attr_wusb_host_name.attr,
+ &dev_attr_wusb_host_band_groups.attr,
+ &dev_attr_wusb_chid.attr,
+ &dev_attr_wusb_cdid.attr,
+ &dev_attr_wusb_device_name.attr,
+ &dev_attr_wusb_device_band_groups.attr,
+ &dev_attr_wusb_ck.attr,
+ NULL,
+};
+
+static const struct attribute_group cbaf_dev_attr_group = {
+ .name = NULL, /* we want them in the same directory */
+ .attrs = cbaf_dev_attrs,
+};
+
+static int cbaf_probe(struct usb_interface *iface,
+ const struct usb_device_id *id)
+{
+ struct cbaf *cbaf;
+ struct device *dev = &iface->dev;
+ int result = -ENOMEM;
+
+ cbaf = kzalloc(sizeof(*cbaf), GFP_KERNEL);
+ if (cbaf == NULL)
+ goto error_kzalloc;
+ cbaf->buffer = kmalloc(512, GFP_KERNEL);
+ if (cbaf->buffer == NULL)
+ goto error_kmalloc_buffer;
+
+ cbaf->buffer_size = 512;
+ cbaf->usb_dev = usb_get_dev(interface_to_usbdev(iface));
+ cbaf->usb_iface = usb_get_intf(iface);
+ result = cbaf_check(cbaf);
+ if (result < 0) {
+ dev_err(dev, "This device is not WUSB-CBAF compliant and is not supported yet.\n");
+ goto error_check;
+ }
+
+ result = sysfs_create_group(&dev->kobj, &cbaf_dev_attr_group);
+ if (result < 0) {
+ dev_err(dev, "Can't register sysfs attr group: %d\n", result);
+ goto error_create_group;
+ }
+ usb_set_intfdata(iface, cbaf);
+ return 0;
+
+error_create_group:
+error_check:
+ usb_put_intf(iface);
+ usb_put_dev(cbaf->usb_dev);
+ kfree(cbaf->buffer);
+error_kmalloc_buffer:
+ kfree(cbaf);
+error_kzalloc:
+ return result;
+}
+
+static void cbaf_disconnect(struct usb_interface *iface)
+{
+ struct cbaf *cbaf = usb_get_intfdata(iface);
+ struct device *dev = &iface->dev;
+ sysfs_remove_group(&dev->kobj, &cbaf_dev_attr_group);
+ usb_set_intfdata(iface, NULL);
+ usb_put_intf(iface);
+ usb_put_dev(cbaf->usb_dev);
+ kfree(cbaf->buffer);
+ /* paranoia: clean up crypto keys */
+ kzfree(cbaf);
+}
+
+static const struct usb_device_id cbaf_id_table[] = {
+ { USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, cbaf_id_table);
+
+static struct usb_driver cbaf_driver = {
+ .name = "wusb-cbaf",
+ .id_table = cbaf_id_table,
+ .probe = cbaf_probe,
+ .disconnect = cbaf_disconnect,
+};
+
+module_usb_driver(cbaf_driver);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Wireless USB Cable Based Association");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wusbcore/crypto.c b/drivers/staging/wusbcore/crypto.c
new file mode 100644
index 000000000000..d7d55ed19a98
--- /dev/null
+++ b/drivers/staging/wusbcore/crypto.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Ultra Wide Band
+ * AES-128 CCM Encryption
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * We don't do any encryption here; we use the Linux Kernel's AES-128
+ * crypto modules to construct keys and payload blocks in a way
+ * defined by WUSB1.0[6]. Check the erratas, as typos are are patched
+ * there.
+ *
+ * Thanks a zillion to John Keys for his help and clarifications over
+ * the designed-by-a-committee text.
+ *
+ * So the idea is that there is this basic Pseudo-Random-Function
+ * defined in WUSB1.0[6.5] which is the core of everything. It works
+ * by tweaking some blocks, AES crypting them and then xoring
+ * something else with them (this seems to be called CBC(AES) -- can
+ * you tell I know jack about crypto?). So we just funnel it into the
+ * Linux Crypto API.
+ *
+ * We leave a crypto test module so we can verify that vectors match,
+ * every now and then.
+ *
+ * Block size: 16 bytes -- AES seems to do things in 'block sizes'. I
+ * am learning a lot...
+ *
+ * Conveniently, some data structures that need to be
+ * funneled through AES are...16 bytes in size!
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include "../uwb/uwb.h"
+#include "include/wusb.h"
+
+static int debug_crypto_verify;
+
+module_param(debug_crypto_verify, int, 0);
+MODULE_PARM_DESC(debug_crypto_verify, "verify the key generation algorithms");
+
+static void wusb_key_dump(const void *buf, size_t len)
+{
+ print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_OFFSET, 16, 1,
+ buf, len, 0);
+}
+
+/*
+ * Block of data, as understood by AES-CCM
+ *
+ * The code assumes this structure is nothing but a 16 byte array
+ * (packed in a struct to avoid common mess ups that I usually do with
+ * arrays and enforcing type checking).
+ */
+struct aes_ccm_block {
+ u8 data[16];
+} __attribute__((packed));
+
+/*
+ * Counter-mode Blocks (WUSB1.0[6.4])
+ *
+ * According to CCM (or so it seems), for the purpose of calculating
+ * the MIC, the message is broken in N counter-mode blocks, B0, B1,
+ * ... BN.
+ *
+ * B0 contains flags, the CCM nonce and l(m).
+ *
+ * B1 contains l(a), the MAC header, the encryption offset and padding.
+ *
+ * If EO is nonzero, additional blocks are built from payload bytes
+ * until EO is exhausted (FIXME: padding to 16 bytes, I guess). The
+ * padding is not xmitted.
+ */
+
+/* WUSB1.0[T6.4] */
+struct aes_ccm_b0 {
+ u8 flags; /* 0x59, per CCM spec */
+ struct aes_ccm_nonce ccm_nonce;
+ __be16 lm;
+} __attribute__((packed));
+
+/* WUSB1.0[T6.5] */
+struct aes_ccm_b1 {
+ __be16 la;
+ u8 mac_header[10];
+ __le16 eo;
+ u8 security_reserved; /* This is always zero */
+ u8 padding; /* 0 */
+} __attribute__((packed));
+
+/*
+ * Encryption Blocks (WUSB1.0[6.4.4])
+ *
+ * CCM uses Ax blocks to generate a keystream with which the MIC and
+ * the message's payload are encoded. A0 always encrypts/decrypts the
+ * MIC. Ax (x>0) are used for the successive payload blocks.
+ *
+ * The x is the counter, and is increased for each block.
+ */
+struct aes_ccm_a {
+ u8 flags; /* 0x01, per CCM spec */
+ struct aes_ccm_nonce ccm_nonce;
+ __be16 counter; /* Value of x */
+} __attribute__((packed));
+
+/* Scratch space for MAC calculations. */
+struct wusb_mac_scratch {
+ struct aes_ccm_b0 b0;
+ struct aes_ccm_b1 b1;
+ struct aes_ccm_a ax;
+};
+
+/*
+ * CC-MAC function WUSB1.0[6.5]
+ *
+ * Take a data string and produce the encrypted CBC Counter-mode MIC
+ *
+ * Note the names for most function arguments are made to (more or
+ * less) match those used in the pseudo-function definition given in
+ * WUSB1.0[6.5].
+ *
+ * @tfm_cbc: CBC(AES) blkcipher handle (initialized)
+ *
+ * @tfm_aes: AES cipher handle (initialized)
+ *
+ * @mic: buffer for placing the computed MIC (Message Integrity
+ * Code). This is exactly 8 bytes, and we expect the buffer to
+ * be at least eight bytes in length.
+ *
+ * @key: 128 bit symmetric key
+ *
+ * @n: CCM nonce
+ *
+ * @a: ASCII string, 14 bytes long (I guess zero padded if needed;
+ * we use exactly 14 bytes).
+ *
+ * @b: data stream to be processed
+ *
+ * @blen: size of b...
+ *
+ * Still not very clear how this is done, but looks like this: we
+ * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with
+ * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we
+ * take the payload and divide it in blocks (16 bytes), xor them with
+ * the previous crypto result (16 bytes) and crypt it, repeat the next
+ * block with the output of the previous one, rinse wash. So we use
+ * the CBC-MAC(AES) shash, that does precisely that. The IV (Initial
+ * Vector) is 16 bytes and is set to zero, so
+ *
+ * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and
+ * using the 14 bytes of @a to fill up
+ * b1.{mac_header,e0,security_reserved,padding}.
+ *
+ * NOTE: The definition of l(a) in WUSB1.0[6.5] vs the definition of
+ * l(m) is orthogonal, they bear no relationship, so it is not
+ * in conflict with the parameter's relation that
+ * WUSB1.0[6.4.2]) defines.
+ *
+ * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in
+ * first errata released on 2005/07.
+ *
+ * NOTE: we need to clean IV to zero at each invocation to make sure
+ * we start with a fresh empty Initial Vector, so that the CBC
+ * works ok.
+ *
+ * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
+ * what sg[4] is for. Maybe there is a smarter way to do this.
+ */
+static int wusb_ccm_mac(struct crypto_shash *tfm_cbcmac,
+ struct wusb_mac_scratch *scratch,
+ void *mic,
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a, const void *b,
+ size_t blen)
+{
+ SHASH_DESC_ON_STACK(desc, tfm_cbcmac);
+ u8 iv[AES_BLOCK_SIZE];
+
+ /*
+ * These checks should be compile time optimized out
+ * ensure @a fills b1's mac_header and following fields
+ */
+ BUILD_BUG_ON(sizeof(*a) != sizeof(scratch->b1) - sizeof(scratch->b1.la));
+ BUILD_BUG_ON(sizeof(scratch->b0) != sizeof(struct aes_ccm_block));
+ BUILD_BUG_ON(sizeof(scratch->b1) != sizeof(struct aes_ccm_block));
+ BUILD_BUG_ON(sizeof(scratch->ax) != sizeof(struct aes_ccm_block));
+
+ /* Setup B0 */
+ scratch->b0.flags = 0x59; /* Format B0 */
+ scratch->b0.ccm_nonce = *n;
+ scratch->b0.lm = cpu_to_be16(0); /* WUSB1.0[6.5] sez l(m) is 0 */
+
+ /* Setup B1
+ *
+ * The WUSB spec is anything but clear! WUSB1.0[6.5]
+ * says that to initialize B1 from A with 'l(a) = blen +
+ * 14'--after clarification, it means to use A's contents
+ * for MAC Header, EO, sec reserved and padding.
+ */
+ scratch->b1.la = cpu_to_be16(blen + 14);
+ memcpy(&scratch->b1.mac_header, a, sizeof(*a));
+
+ desc->tfm = tfm_cbcmac;
+ crypto_shash_init(desc);
+ crypto_shash_update(desc, (u8 *)&scratch->b0, sizeof(scratch->b0) +
+ sizeof(scratch->b1));
+ crypto_shash_finup(desc, b, blen, iv);
+
+ /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5]
+ * The procedure is to AES crypt the A0 block and XOR the MIC
+ * Tag against it; we only do the first 8 bytes and place it
+ * directly in the destination buffer.
+ */
+ scratch->ax.flags = 0x01; /* as per WUSB 1.0 spec */
+ scratch->ax.ccm_nonce = *n;
+ scratch->ax.counter = 0;
+
+ /* reuse the CBC-MAC transform to perform the single block encryption */
+ crypto_shash_digest(desc, (u8 *)&scratch->ax, sizeof(scratch->ax),
+ (u8 *)&scratch->ax);
+
+ crypto_xor_cpy(mic, (u8 *)&scratch->ax, iv, 8);
+
+ return 8;
+}
+
+/*
+ * WUSB Pseudo Random Function (WUSB1.0[6.5])
+ *
+ * @b: buffer to the source data; cannot be a global or const local
+ * (will confuse the scatterlists)
+ */
+ssize_t wusb_prf(void *out, size_t out_size,
+ const u8 key[16], const struct aes_ccm_nonce *_n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen, size_t len)
+{
+ ssize_t result, bytes = 0, bitr;
+ struct aes_ccm_nonce n = *_n;
+ struct crypto_shash *tfm_cbcmac;
+ struct wusb_mac_scratch scratch;
+ u64 sfn = 0;
+ __le64 sfn_le;
+
+ tfm_cbcmac = crypto_alloc_shash("cbcmac(aes)", 0, 0);
+ if (IS_ERR(tfm_cbcmac)) {
+ result = PTR_ERR(tfm_cbcmac);
+ printk(KERN_ERR "E: can't load CBCMAC-AES: %d\n", (int)result);
+ goto error_alloc_cbcmac;
+ }
+
+ result = crypto_shash_setkey(tfm_cbcmac, key, AES_BLOCK_SIZE);
+ if (result < 0) {
+ printk(KERN_ERR "E: can't set CBCMAC-AES key: %d\n", (int)result);
+ goto error_setkey_cbcmac;
+ }
+
+ for (bitr = 0; bitr < (len + 63) / 64; bitr++) {
+ sfn_le = cpu_to_le64(sfn++);
+ memcpy(&n.sfn, &sfn_le, sizeof(n.sfn)); /* n.sfn++... */
+ result = wusb_ccm_mac(tfm_cbcmac, &scratch, out + bytes,
+ &n, a, b, blen);
+ if (result < 0)
+ goto error_ccm_mac;
+ bytes += result;
+ }
+ result = bytes;
+
+error_ccm_mac:
+error_setkey_cbcmac:
+ crypto_free_shash(tfm_cbcmac);
+error_alloc_cbcmac:
+ return result;
+}
+
+/* WUSB1.0[A.2] test vectors */
+static const u8 stv_hsmic_key[16] = {
+ 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
+ 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
+};
+
+static const struct aes_ccm_nonce stv_hsmic_n = {
+ .sfn = { 0 },
+ .tkid = { 0x76, 0x98, 0x01, },
+ .dest_addr = { .data = { 0xbe, 0x00 } },
+ .src_addr = { .data = { 0x76, 0x98 } },
+};
+
+/*
+ * Out-of-band MIC Generation verification code
+ *
+ */
+static int wusb_oob_mic_verify(void)
+{
+ int result;
+ u8 mic[8];
+ /* WUSB1.0[A.2] test vectors */
+ static const struct usb_handshake stv_hsmic_hs = {
+ .bMessageNumber = 2,
+ .bStatus = 00,
+ .tTKID = { 0x76, 0x98, 0x01 },
+ .bReserved = 00,
+ .CDID = { 0x30, 0x31, 0x32, 0x33, 0x34, 0x35,
+ 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b,
+ 0x3c, 0x3d, 0x3e, 0x3f },
+ .nonce = { 0x20, 0x21, 0x22, 0x23, 0x24, 0x25,
+ 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b,
+ 0x2c, 0x2d, 0x2e, 0x2f },
+ .MIC = { 0x75, 0x6a, 0x97, 0x51, 0x0c, 0x8c,
+ 0x14, 0x7b },
+ };
+ size_t hs_size;
+
+ result = wusb_oob_mic(mic, stv_hsmic_key, &stv_hsmic_n, &stv_hsmic_hs);
+ if (result < 0)
+ printk(KERN_ERR "E: WUSB OOB MIC test: failed: %d\n", result);
+ else if (memcmp(stv_hsmic_hs.MIC, mic, sizeof(mic))) {
+ printk(KERN_ERR "E: OOB MIC test: "
+ "mismatch between MIC result and WUSB1.0[A2]\n");
+ hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC);
+ printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size);
+ wusb_key_dump(&stv_hsmic_hs, hs_size);
+ printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n",
+ sizeof(stv_hsmic_n));
+ wusb_key_dump(&stv_hsmic_n, sizeof(stv_hsmic_n));
+ printk(KERN_ERR "E: MIC out:\n");
+ wusb_key_dump(mic, sizeof(mic));
+ printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n");
+ wusb_key_dump(stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC));
+ result = -EINVAL;
+ } else
+ result = 0;
+ return result;
+}
+
+/*
+ * Test vectors for Key derivation
+ *
+ * These come from WUSB1.0[6.5.1], the vectors in WUSB1.0[A.1]
+ * (errata corrected in 2005/07).
+ */
+static const u8 stv_key_a1[16] __attribute__ ((__aligned__(4))) = {
+ 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87,
+ 0x78, 0x69, 0x5a, 0x4b, 0x3c, 0x2d, 0x1e, 0x0f
+};
+
+static const struct aes_ccm_nonce stv_keydvt_n_a1 = {
+ .sfn = { 0 },
+ .tkid = { 0x76, 0x98, 0x01, },
+ .dest_addr = { .data = { 0xbe, 0x00 } },
+ .src_addr = { .data = { 0x76, 0x98 } },
+};
+
+static const struct wusb_keydvt_out stv_keydvt_out_a1 = {
+ .kck = {
+ 0x4b, 0x79, 0xa3, 0xcf, 0xe5, 0x53, 0x23, 0x9d,
+ 0xd7, 0xc1, 0x6d, 0x1c, 0x2d, 0xab, 0x6d, 0x3f
+ },
+ .ptk = {
+ 0xc8, 0x70, 0x62, 0x82, 0xb6, 0x7c, 0xe9, 0x06,
+ 0x7b, 0xc5, 0x25, 0x69, 0xf2, 0x36, 0x61, 0x2d
+ }
+};
+
+/*
+ * Performa a test to make sure we match the vectors defined in
+ * WUSB1.0[A.1](Errata2006/12)
+ */
+static int wusb_key_derive_verify(void)
+{
+ int result = 0;
+ struct wusb_keydvt_out keydvt_out;
+ /* These come from WUSB1.0[A.1] + 2006/12 errata */
+ static const struct wusb_keydvt_in stv_keydvt_in_a1 = {
+ .hnonce = {
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
+ },
+ .dnonce = {
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f
+ }
+ };
+
+ result = wusb_key_derive(&keydvt_out, stv_key_a1, &stv_keydvt_n_a1,
+ &stv_keydvt_in_a1);
+ if (result < 0)
+ printk(KERN_ERR "E: WUSB key derivation test: "
+ "derivation failed: %d\n", result);
+ if (memcmp(&stv_keydvt_out_a1, &keydvt_out, sizeof(keydvt_out))) {
+ printk(KERN_ERR "E: WUSB key derivation test: "
+ "mismatch between key derivation result "
+ "and WUSB1.0[A1] Errata 2006/12\n");
+ printk(KERN_ERR "E: keydvt in: key\n");
+ wusb_key_dump(stv_key_a1, sizeof(stv_key_a1));
+ printk(KERN_ERR "E: keydvt in: nonce\n");
+ wusb_key_dump(&stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1));
+ printk(KERN_ERR "E: keydvt in: hnonce & dnonce\n");
+ wusb_key_dump(&stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1));
+ printk(KERN_ERR "E: keydvt out: KCK\n");
+ wusb_key_dump(&keydvt_out.kck, sizeof(keydvt_out.kck));
+ printk(KERN_ERR "E: keydvt out: PTK\n");
+ wusb_key_dump(&keydvt_out.ptk, sizeof(keydvt_out.ptk));
+ result = -EINVAL;
+ } else
+ result = 0;
+ return result;
+}
+
+/*
+ * Initialize crypto system
+ *
+ * FIXME: we do nothing now, other than verifying. Later on we'll
+ * cache the encryption stuff, so that's why we have a separate init.
+ */
+int wusb_crypto_init(void)
+{
+ int result;
+
+ if (debug_crypto_verify) {
+ result = wusb_key_derive_verify();
+ if (result < 0)
+ return result;
+ return wusb_oob_mic_verify();
+ }
+ return 0;
+}
+
+void wusb_crypto_exit(void)
+{
+ /* FIXME: free cached crypto transforms */
+}
diff --git a/drivers/staging/wusbcore/dev-sysfs.c b/drivers/staging/wusbcore/dev-sysfs.c
new file mode 100644
index 000000000000..67b0a4c412b2
--- /dev/null
+++ b/drivers/staging/wusbcore/dev-sysfs.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * WUSB devices
+ * sysfs bindings
+ *
+ * Copyright (C) 2007 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * Get them out of the way...
+ */
+
+#include <linux/jiffies.h>
+#include <linux/ctype.h>
+#include <linux/workqueue.h>
+#include "wusbhc.h"
+
+static ssize_t wusb_disconnect_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct usb_device *usb_dev;
+ struct wusbhc *wusbhc;
+ unsigned command;
+ u8 port_idx;
+
+ if (sscanf(buf, "%u", &command) != 1)
+ return -EINVAL;
+ if (command == 0)
+ return size;
+ usb_dev = to_usb_device(dev);
+ wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+ if (wusbhc == NULL)
+ return -ENODEV;
+
+ mutex_lock(&wusbhc->mutex);
+ port_idx = wusb_port_no_to_idx(usb_dev->portnum);
+ __wusbhc_dev_disable(wusbhc, port_idx);
+ mutex_unlock(&wusbhc->mutex);
+ wusbhc_put(wusbhc);
+ return size;
+}
+static DEVICE_ATTR_WO(wusb_disconnect);
+
+static ssize_t wusb_cdid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t result;
+ struct wusb_dev *wusb_dev;
+
+ wusb_dev = wusb_dev_get_by_usb_dev(to_usb_device(dev));
+ if (wusb_dev == NULL)
+ return -ENODEV;
+ result = sprintf(buf, "%16ph\n", wusb_dev->cdid.data);
+ wusb_dev_put(wusb_dev);
+ return result;
+}
+static DEVICE_ATTR_RO(wusb_cdid);
+
+static ssize_t wusb_ck_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int result;
+ struct usb_device *usb_dev;
+ struct wusbhc *wusbhc;
+ struct wusb_ckhdid ck;
+
+ result = sscanf(buf,
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx\n",
+ &ck.data[0] , &ck.data[1],
+ &ck.data[2] , &ck.data[3],
+ &ck.data[4] , &ck.data[5],
+ &ck.data[6] , &ck.data[7],
+ &ck.data[8] , &ck.data[9],
+ &ck.data[10], &ck.data[11],
+ &ck.data[12], &ck.data[13],
+ &ck.data[14], &ck.data[15]);
+ if (result != 16)
+ return -EINVAL;
+
+ usb_dev = to_usb_device(dev);
+ wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+ if (wusbhc == NULL)
+ return -ENODEV;
+ result = wusb_dev_4way_handshake(wusbhc, usb_dev->wusb_dev, &ck);
+ memzero_explicit(&ck, sizeof(ck));
+ wusbhc_put(wusbhc);
+ return result < 0 ? result : size;
+}
+static DEVICE_ATTR_WO(wusb_ck);
+
+static struct attribute *wusb_dev_attrs[] = {
+ &dev_attr_wusb_disconnect.attr,
+ &dev_attr_wusb_cdid.attr,
+ &dev_attr_wusb_ck.attr,
+ NULL,
+};
+
+static const struct attribute_group wusb_dev_attr_group = {
+ .name = NULL, /* we want them in the same directory */
+ .attrs = wusb_dev_attrs,
+};
+
+int wusb_dev_sysfs_add(struct wusbhc *wusbhc, struct usb_device *usb_dev,
+ struct wusb_dev *wusb_dev)
+{
+ int result = sysfs_create_group(&usb_dev->dev.kobj,
+ &wusb_dev_attr_group);
+ struct device *dev = &usb_dev->dev;
+ if (result < 0)
+ dev_err(dev, "Cannot register WUSB-dev attributes: %d\n",
+ result);
+ return result;
+}
+
+void wusb_dev_sysfs_rm(struct wusb_dev *wusb_dev)
+{
+ struct usb_device *usb_dev = wusb_dev->usb_dev;
+ if (usb_dev)
+ sysfs_remove_group(&usb_dev->dev.kobj, &wusb_dev_attr_group);
+}
diff --git a/drivers/staging/wusbcore/devconnect.c b/drivers/staging/wusbcore/devconnect.c
new file mode 100644
index 000000000000..1170f8baf608
--- /dev/null
+++ b/drivers/staging/wusbcore/devconnect.c
@@ -0,0 +1,1085 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
+ * Device Connect handling
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * FIXME: docs
+ * FIXME: this file needs to be broken up, it's grown too big
+ *
+ *
+ * WUSB1.0[7.1, 7.5.1, ]
+ *
+ * WUSB device connection is kind of messy. Some background:
+ *
+ * When a device wants to connect it scans the UWB radio channels
+ * looking for a WUSB Channel; a WUSB channel is defined by MMCs
+ * (Micro Managed Commands or something like that) [see
+ * Design-overview for more on this] .
+ *
+ * So, device scans the radio, finds MMCs and thus a host and checks
+ * when the next DNTS is. It sends a Device Notification Connect
+ * (DN_Connect); the host picks it up (through nep.c and notif.c, ends
+ * up in wusb_devconnect_ack(), which creates a wusb_dev structure in
+ * wusbhc->port[port_number].wusb_dev), assigns an unauth address
+ * to the device (this means from 0x80 to 0xfe) and sends, in the MMC
+ * a Connect Ack Information Element (ConnAck IE).
+ *
+ * So now the device now has a WUSB address. From now on, we use
+ * that to talk to it in the RPipes.
+ *
+ * ASSUMPTIONS:
+ *
+ * - We use the the as device address the port number where it is
+ * connected (port 0 doesn't exist). For unauth, it is 128 + that.
+ *
+ * ROADMAP:
+ *
+ * This file contains the logic for doing that--entry points:
+ *
+ * wusb_devconnect_ack() Ack a device until _acked() called.
+ * Called by notif.c:wusb_handle_dn_connect()
+ * when a DN_Connect is received.
+ *
+ * wusb_devconnect_acked() Ack done, release resources.
+ *
+ * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn()
+ * for processing a DN_Alive pong from a device.
+ *
+ * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to
+ * process a disconnect request from a
+ * device.
+ *
+ * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when
+ * disabling a port.
+ *
+ * wusb_devconnect_create() Called when creating the host by
+ * lc.c:wusbhc_create().
+ *
+ * wusb_devconnect_destroy() Cleanup called removing the host. Called
+ * by lc.c:wusbhc_destroy().
+ *
+ * Each Wireless USB host maintains a list of DN_Connect requests
+ * (actually we maintain a list of pending Connect Acks, the
+ * wusbhc->ca_list).
+ *
+ * LIFE CYCLE OF port->wusb_dev
+ *
+ * Before the @wusbhc structure put()s the reference it owns for
+ * port->wusb_dev [and clean the wusb_dev pointer], it needs to
+ * lock @wusbhc->mutex.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/export.h>
+#include "wusbhc.h"
+
+static void wusbhc_devconnect_acked_work(struct work_struct *work);
+
+static void wusb_dev_free(struct wusb_dev *wusb_dev)
+{
+ kfree(wusb_dev);
+}
+
+static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
+{
+ struct wusb_dev *wusb_dev;
+
+ wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL);
+ if (wusb_dev == NULL)
+ goto err;
+
+ wusb_dev->wusbhc = wusbhc;
+
+ INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work);
+
+ return wusb_dev;
+err:
+ wusb_dev_free(wusb_dev);
+ return NULL;
+}
+
+
+/*
+ * Using the Connect-Ack list, fill out the @wusbhc Connect-Ack WUSB IE
+ * properly so that it can be added to the MMC.
+ *
+ * We just get the @wusbhc->ca_list and fill out the first four ones or
+ * less (per-spec WUSB1.0[7.5, before T7-38). If the ConnectAck WUSB
+ * IE is not allocated, we alloc it.
+ *
+ * @wusbhc->mutex must be taken
+ */
+static void wusbhc_fill_cack_ie(struct wusbhc *wusbhc)
+{
+ unsigned cnt;
+ struct wusb_dev *dev_itr;
+ struct wuie_connect_ack *cack_ie;
+
+ cack_ie = &wusbhc->cack_ie;
+ cnt = 0;
+ list_for_each_entry(dev_itr, &wusbhc->cack_list, cack_node) {
+ cack_ie->blk[cnt].CDID = dev_itr->cdid;
+ cack_ie->blk[cnt].bDeviceAddress = dev_itr->addr;
+ if (++cnt >= WUIE_ELT_MAX)
+ break;
+ }
+ cack_ie->hdr.bLength = sizeof(cack_ie->hdr)
+ + cnt * sizeof(cack_ie->blk[0]);
+}
+
+/*
+ * Register a new device that wants to connect
+ *
+ * A new device wants to connect, so we add it to the Connect-Ack
+ * list. We give it an address in the unauthorized range (bit 8 set);
+ * user space will have to drive authorization further on.
+ *
+ * @dev_addr: address to use for the device (which is also the port
+ * number).
+ *
+ * @wusbhc->mutex must be taken
+ */
+static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc,
+ struct wusb_dn_connect *dnc,
+ const char *pr_cdid, u8 port_idx)
+{
+ struct device *dev = wusbhc->dev;
+ struct wusb_dev *wusb_dev;
+ int new_connection = wusb_dn_connect_new_connection(dnc);
+ u8 dev_addr;
+ int result;
+
+ /* Is it registered already? */
+ list_for_each_entry(wusb_dev, &wusbhc->cack_list, cack_node)
+ if (!memcmp(&wusb_dev->cdid, &dnc->CDID,
+ sizeof(wusb_dev->cdid)))
+ return wusb_dev;
+ /* We don't have it, create an entry, register it */
+ wusb_dev = wusb_dev_alloc(wusbhc);
+ if (wusb_dev == NULL)
+ return NULL;
+ wusb_dev_init(wusb_dev);
+ wusb_dev->cdid = dnc->CDID;
+ wusb_dev->port_idx = port_idx;
+
+ /*
+ * Devices are always available within the cluster reservation
+ * and since the hardware will take the intersection of the
+ * per-device availability and the cluster reservation, the
+ * per-device availability can simply be set to always
+ * available.
+ */
+ bitmap_fill(wusb_dev->availability.bm, UWB_NUM_MAS);
+
+ /* FIXME: handle reconnects instead of assuming connects are
+ always new. */
+ if (1 && new_connection == 0)
+ new_connection = 1;
+ if (new_connection) {
+ dev_addr = (port_idx + 2) | WUSB_DEV_ADDR_UNAUTH;
+
+ dev_info(dev, "Connecting new WUSB device to address %u, "
+ "port %u\n", dev_addr, port_idx);
+
+ result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr);
+ if (result < 0)
+ return NULL;
+ }
+ wusb_dev->entry_ts = jiffies;
+ list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list);
+ wusbhc->cack_count++;
+ wusbhc_fill_cack_ie(wusbhc);
+
+ return wusb_dev;
+}
+
+/*
+ * Remove a Connect-Ack context entry from the HCs view
+ *
+ * @wusbhc->mutex must be taken
+ */
+static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ list_del_init(&wusb_dev->cack_node);
+ wusbhc->cack_count--;
+ wusbhc_fill_cack_ie(wusbhc);
+}
+
+/*
+ * @wusbhc->mutex must be taken */
+static
+void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ wusbhc_cack_rm(wusbhc, wusb_dev);
+ if (wusbhc->cack_count)
+ wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
+ else
+ wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr);
+}
+
+static void wusbhc_devconnect_acked_work(struct work_struct *work)
+{
+ struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev,
+ devconnect_acked_work);
+ struct wusbhc *wusbhc = wusb_dev->wusbhc;
+
+ mutex_lock(&wusbhc->mutex);
+ wusbhc_devconnect_acked(wusbhc, wusb_dev);
+ mutex_unlock(&wusbhc->mutex);
+
+ wusb_dev_put(wusb_dev);
+}
+
+/*
+ * Ack a device for connection
+ *
+ * FIXME: docs
+ *
+ * @pr_cdid: Printable CDID...hex Use @dnc->cdid for the real deal.
+ *
+ * So we get the connect ack IE (may have been allocated already),
+ * find an empty connect block, an empty virtual port, create an
+ * address with it (see below), make it an unauth addr [bit 7 set] and
+ * set the MMC.
+ *
+ * Addresses: because WUSB hosts have no downstream hubs, we can do a
+ * 1:1 mapping between 'port number' and device
+ * address. This simplifies many things, as during this
+ * initial connect phase the USB stack has no knowledge of
+ * the device and hasn't assigned an address yet--we know
+ * USB's choose_address() will use the same heuristics we
+ * use here, so we can assume which address will be assigned.
+ *
+ * USB stack always assigns address 1 to the root hub, so
+ * to the port number we add 2 (thus virtual port #0 is
+ * addr #2).
+ *
+ * @wusbhc shall be referenced
+ */
+static
+void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc,
+ const char *pr_cdid)
+{
+ int result;
+ struct device *dev = wusbhc->dev;
+ struct wusb_dev *wusb_dev;
+ struct wusb_port *port;
+ unsigned idx;
+
+ mutex_lock(&wusbhc->mutex);
+
+ /* Check we are not handling it already */
+ for (idx = 0; idx < wusbhc->ports_max; idx++) {
+ port = wusb_port_by_idx(wusbhc, idx);
+ if (port->wusb_dev
+ && memcmp(&dnc->CDID, &port->wusb_dev->cdid, sizeof(dnc->CDID)) == 0)
+ goto error_unlock;
+ }
+ /* Look up those fake ports we have for a free one */
+ for (idx = 0; idx < wusbhc->ports_max; idx++) {
+ port = wusb_port_by_idx(wusbhc, idx);
+ if ((port->status & USB_PORT_STAT_POWER)
+ && !(port->status & USB_PORT_STAT_CONNECTION))
+ break;
+ }
+ if (idx >= wusbhc->ports_max) {
+ dev_err(dev, "Host controller can't connect more devices "
+ "(%u already connected); device %s rejected\n",
+ wusbhc->ports_max, pr_cdid);
+ /* NOTE: we could send a WUIE_Disconnect here, but we haven't
+ * event acked, so the device will eventually timeout the
+ * connection, right? */
+ goto error_unlock;
+ }
+
+ /* Make sure we are using no crypto on that "virtual port" */
+ wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0);
+
+ /* Grab a filled in Connect-Ack context, fill out the
+ * Connect-Ack Wireless USB IE, set the MMC */
+ wusb_dev = wusbhc_cack_add(wusbhc, dnc, pr_cdid, idx);
+ if (wusb_dev == NULL)
+ goto error_unlock;
+ result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr);
+ if (result < 0)
+ goto error_unlock;
+ /* Give the device at least 2ms (WUSB1.0[7.5.1p3]), let's do
+ * three for a good measure */
+ msleep(3);
+ port->wusb_dev = wusb_dev;
+ port->status |= USB_PORT_STAT_CONNECTION;
+ port->change |= USB_PORT_STAT_C_CONNECTION;
+ /* Now the port status changed to connected; hub_wq will
+ * pick the change up and try to reset the port to bring it to
+ * the enabled state--so this process returns up to the stack
+ * and it calls back into wusbhc_rh_port_reset().
+ */
+error_unlock:
+ mutex_unlock(&wusbhc->mutex);
+ return;
+
+}
+
+/*
+ * Disconnect a Wireless USB device from its fake port
+ *
+ * Marks the port as disconnected so that hub_wq can pick up the change
+ * and drops our knowledge about the device.
+ *
+ * Assumes there is a device connected
+ *
+ * @port_index: zero based port number
+ *
+ * NOTE: @wusbhc->mutex is locked
+ *
+ * WARNING: From here it is not very safe to access anything hanging off
+ * wusb_dev
+ */
+static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc,
+ struct wusb_port *port)
+{
+ struct wusb_dev *wusb_dev = port->wusb_dev;
+
+ port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE
+ | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET
+ | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED);
+ port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE;
+ if (wusb_dev) {
+ dev_dbg(wusbhc->dev, "disconnecting device from port %d\n", wusb_dev->port_idx);
+ if (!list_empty(&wusb_dev->cack_node))
+ list_del_init(&wusb_dev->cack_node);
+ /* For the one in cack_add() */
+ wusb_dev_put(wusb_dev);
+ }
+ port->wusb_dev = NULL;
+
+ /* After a device disconnects, change the GTK (see [WUSB]
+ * section 6.2.11.2). */
+ if (wusbhc->active)
+ wusbhc_gtk_rekey(wusbhc);
+
+ /* The Wireless USB part has forgotten about the device already; now
+ * hub_wq's timer will pick up the disconnection and remove the USB
+ * device from the system
+ */
+}
+
+/*
+ * Refresh the list of keep alives to emit in the MMC
+ *
+ * We only publish the first four devices that have a coming timeout
+ * condition. Then when we are done processing those, we go for the
+ * next ones. We ignore the ones that have timed out already (they'll
+ * be purged).
+ *
+ * This might cause the first devices to timeout the last devices in
+ * the port array...FIXME: come up with a better algorithm?
+ *
+ * Note we can't do much about MMC's ops errors; we hope next refresh
+ * will kind of handle it.
+ *
+ * NOTE: @wusbhc->mutex is locked
+ */
+static void __wusbhc_keep_alive(struct wusbhc *wusbhc)
+{
+ struct device *dev = wusbhc->dev;
+ unsigned cnt;
+ struct wusb_dev *wusb_dev;
+ struct wusb_port *wusb_port;
+ struct wuie_keep_alive *ie = &wusbhc->keep_alive_ie;
+ unsigned keep_alives, old_keep_alives;
+
+ old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr);
+ keep_alives = 0;
+ for (cnt = 0;
+ keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max;
+ cnt++) {
+ unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout);
+
+ wusb_port = wusb_port_by_idx(wusbhc, cnt);
+ wusb_dev = wusb_port->wusb_dev;
+
+ if (wusb_dev == NULL)
+ continue;
+ if (wusb_dev->usb_dev == NULL)
+ continue;
+
+ if (time_after(jiffies, wusb_dev->entry_ts + tt)) {
+ dev_err(dev, "KEEPALIVE: device %u timed out\n",
+ wusb_dev->addr);
+ __wusbhc_dev_disconnect(wusbhc, wusb_port);
+ } else if (time_after(jiffies, wusb_dev->entry_ts + tt/3)) {
+ /* Approaching timeout cut off, need to refresh */
+ ie->bDeviceAddress[keep_alives++] = wusb_dev->addr;
+ }
+ }
+ if (keep_alives & 0x1) /* pad to even number ([WUSB] section 7.5.9) */
+ ie->bDeviceAddress[keep_alives++] = 0x7f;
+ ie->hdr.bLength = sizeof(ie->hdr) +
+ keep_alives*sizeof(ie->bDeviceAddress[0]);
+ if (keep_alives > 0)
+ wusbhc_mmcie_set(wusbhc, 10, 5, &ie->hdr);
+ else if (old_keep_alives != 0)
+ wusbhc_mmcie_rm(wusbhc, &ie->hdr);
+}
+
+/*
+ * Do a run through all devices checking for timeouts
+ */
+static void wusbhc_keep_alive_run(struct work_struct *ws)
+{
+ struct delayed_work *dw = to_delayed_work(ws);
+ struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer);
+
+ mutex_lock(&wusbhc->mutex);
+ __wusbhc_keep_alive(wusbhc);
+ mutex_unlock(&wusbhc->mutex);
+
+ queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
+ msecs_to_jiffies(wusbhc->trust_timeout / 2));
+}
+
+/*
+ * Find the wusb_dev from its device address.
+ *
+ * The device can be found directly from the address (see
+ * wusb_cack_add() for where the device address is set to port_idx
+ * +2), except when the address is zero.
+ */
+static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr)
+{
+ int p;
+
+ if (addr == 0xff) /* unconnected */
+ return NULL;
+
+ if (addr > 0) {
+ int port = (addr & ~0x80) - 2;
+ if (port < 0 || port >= wusbhc->ports_max)
+ return NULL;
+ return wusb_port_by_idx(wusbhc, port)->wusb_dev;
+ }
+
+ /* Look for the device with address 0. */
+ for (p = 0; p < wusbhc->ports_max; p++) {
+ struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev;
+ if (wusb_dev && wusb_dev->addr == addr)
+ return wusb_dev;
+ }
+ return NULL;
+}
+
+/*
+ * Handle a DN_Alive notification (WUSB1.0[7.6.1])
+ *
+ * This just updates the device activity timestamp and then refreshes
+ * the keep alive IE.
+ *
+ * @wusbhc shall be referenced and unlocked
+ */
+static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, u8 srcaddr)
+{
+ struct wusb_dev *wusb_dev;
+
+ mutex_lock(&wusbhc->mutex);
+ wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
+ if (wusb_dev == NULL) {
+ dev_dbg(wusbhc->dev, "ignoring DN_Alive from unconnected device %02x\n",
+ srcaddr);
+ } else {
+ wusb_dev->entry_ts = jiffies;
+ __wusbhc_keep_alive(wusbhc);
+ }
+ mutex_unlock(&wusbhc->mutex);
+}
+
+/*
+ * Handle a DN_Connect notification (WUSB1.0[7.6.1])
+ *
+ * @wusbhc
+ * @pkt_hdr
+ * @size: Size of the buffer where the notification resides; if the
+ * notification data suggests there should be more data than
+ * available, an error will be signaled and the whole buffer
+ * consumed.
+ *
+ * @wusbhc->mutex shall be held
+ */
+static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc,
+ struct wusb_dn_hdr *dn_hdr,
+ size_t size)
+{
+ struct device *dev = wusbhc->dev;
+ struct wusb_dn_connect *dnc;
+ char pr_cdid[WUSB_CKHDID_STRSIZE];
+ static const char *beacon_behaviour[] = {
+ "reserved",
+ "self-beacon",
+ "directed-beacon",
+ "no-beacon"
+ };
+
+ if (size < sizeof(*dnc)) {
+ dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n",
+ size, sizeof(*dnc));
+ return;
+ }
+
+ dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr);
+ sprintf(pr_cdid, "%16ph", dnc->CDID.data);
+ dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n",
+ pr_cdid,
+ wusb_dn_connect_prev_dev_addr(dnc),
+ beacon_behaviour[wusb_dn_connect_beacon_behavior(dnc)],
+ wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect");
+ /* ACK the connect */
+ wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid);
+}
+
+/*
+ * Handle a DN_Disconnect notification (WUSB1.0[7.6.1])
+ *
+ * Device is going down -- do the disconnect.
+ *
+ * @wusbhc shall be referenced and unlocked
+ */
+static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, u8 srcaddr)
+{
+ struct device *dev = wusbhc->dev;
+ struct wusb_dev *wusb_dev;
+
+ mutex_lock(&wusbhc->mutex);
+ wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr);
+ if (wusb_dev == NULL) {
+ dev_dbg(dev, "ignoring DN DISCONNECT from unconnected device %02x\n",
+ srcaddr);
+ } else {
+ dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n",
+ wusb_dev->addr);
+ __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc,
+ wusb_dev->port_idx));
+ }
+ mutex_unlock(&wusbhc->mutex);
+}
+
+/*
+ * Handle a Device Notification coming a host
+ *
+ * The Device Notification comes from a host (HWA, DWA or WHCI)
+ * wrapped in a set of headers. Somebody else has peeled off those
+ * headers for us and we just get one Device Notifications.
+ *
+ * Invalid DNs (e.g., too short) are discarded.
+ *
+ * @wusbhc shall be referenced
+ *
+ * FIXMES:
+ * - implement priorities as in WUSB1.0[Table 7-55]?
+ */
+void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr,
+ struct wusb_dn_hdr *dn_hdr, size_t size)
+{
+ struct device *dev = wusbhc->dev;
+
+ if (size < sizeof(struct wusb_dn_hdr)) {
+ dev_err(dev, "DN data shorter than DN header (%d < %d)\n",
+ (int)size, (int)sizeof(struct wusb_dn_hdr));
+ return;
+ }
+ switch (dn_hdr->bType) {
+ case WUSB_DN_CONNECT:
+ wusbhc_handle_dn_connect(wusbhc, dn_hdr, size);
+ break;
+ case WUSB_DN_ALIVE:
+ wusbhc_handle_dn_alive(wusbhc, srcaddr);
+ break;
+ case WUSB_DN_DISCONNECT:
+ wusbhc_handle_dn_disconnect(wusbhc, srcaddr);
+ break;
+ case WUSB_DN_MASAVAILCHANGED:
+ case WUSB_DN_RWAKE:
+ case WUSB_DN_SLEEP:
+ /* FIXME: handle these DNs. */
+ break;
+ case WUSB_DN_EPRDY:
+ /* The hardware handles these. */
+ break;
+ default:
+ dev_warn(dev, "unknown DN %u (%d octets) from %u\n",
+ dn_hdr->bType, (int)size, srcaddr);
+ }
+}
+EXPORT_SYMBOL_GPL(wusbhc_handle_dn);
+
+/*
+ * Disconnect a WUSB device from a the cluster
+ *
+ * @wusbhc
+ * @port Fake port where the device is (wusbhc index, not USB port number).
+ *
+ * In Wireless USB, a disconnect is basically telling the device he is
+ * being disconnected and forgetting about him.
+ *
+ * We send the device a Device Disconnect IE (WUSB1.0[7.5.11]) for 100
+ * ms and then keep going.
+ *
+ * We don't do much in case of error; we always pretend we disabled
+ * the port and disconnected the device. If physically the request
+ * didn't get there (many things can fail in the way there), the stack
+ * will reject the device's communication attempts.
+ *
+ * @wusbhc should be refcounted and locked
+ */
+void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx)
+{
+ int result;
+ struct device *dev = wusbhc->dev;
+ struct wusb_dev *wusb_dev;
+ struct wuie_disconnect *ie;
+
+ wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
+ if (wusb_dev == NULL) {
+ /* reset no device? ignore */
+ dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n",
+ port_idx);
+ return;
+ }
+ __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
+
+ ie = kzalloc(sizeof(*ie), GFP_KERNEL);
+ if (ie == NULL)
+ return;
+ ie->hdr.bLength = sizeof(*ie);
+ ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT;
+ ie->bDeviceAddress = wusb_dev->addr;
+ result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr);
+ if (result < 0)
+ dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result);
+ else {
+ /* At least 6 MMCs, assuming at least 1 MMC per zone. */
+ msleep(7*4);
+ wusbhc_mmcie_rm(wusbhc, &ie->hdr);
+ }
+ kfree(ie);
+}
+
+/*
+ * Walk over the BOS descriptor, verify and grok it
+ *
+ * @usb_dev: referenced
+ * @wusb_dev: referenced and unlocked
+ *
+ * The BOS descriptor is defined at WUSB1.0[7.4.1], and it defines a
+ * "flexible" way to wrap all kinds of descriptors inside an standard
+ * descriptor (wonder why they didn't use normal descriptors,
+ * btw). Not like they lack code.
+ *
+ * At the end we go to look for the WUSB Device Capabilities
+ * (WUSB1.0[7.4.1.1]) that is wrapped in a device capability descriptor
+ * that is part of the BOS descriptor set. That tells us what does the
+ * device support (dual role, beacon type, UWB PHY rates).
+ */
+static int wusb_dev_bos_grok(struct usb_device *usb_dev,
+ struct wusb_dev *wusb_dev,
+ struct usb_bos_descriptor *bos, size_t desc_size)
+{
+ ssize_t result;
+ struct device *dev = &usb_dev->dev;
+ void *itr, *top;
+
+ /* Walk over BOS capabilities, verify them */
+ itr = (void *)bos + sizeof(*bos);
+ top = itr + desc_size - sizeof(*bos);
+ while (itr < top) {
+ struct usb_dev_cap_header *cap_hdr = itr;
+ size_t cap_size;
+ u8 cap_type;
+ if (top - itr < sizeof(*cap_hdr)) {
+ dev_err(dev, "Device BUG? premature end of BOS header "
+ "data [offset 0x%02x]: only %zu bytes left\n",
+ (int)(itr - (void *)bos), top - itr);
+ result = -ENOSPC;
+ goto error_bad_cap;
+ }
+ cap_size = cap_hdr->bLength;
+ cap_type = cap_hdr->bDevCapabilityType;
+ if (cap_size == 0)
+ break;
+ if (cap_size > top - itr) {
+ dev_err(dev, "Device BUG? premature end of BOS data "
+ "[offset 0x%02x cap %02x %zu bytes]: "
+ "only %zu bytes left\n",
+ (int)(itr - (void *)bos),
+ cap_type, cap_size, top - itr);
+ result = -EBADF;
+ goto error_bad_cap;
+ }
+ switch (cap_type) {
+ case USB_CAP_TYPE_WIRELESS_USB:
+ if (cap_size != sizeof(*wusb_dev->wusb_cap_descr))
+ dev_err(dev, "Device BUG? WUSB Capability "
+ "descriptor is %zu bytes vs %zu "
+ "needed\n", cap_size,
+ sizeof(*wusb_dev->wusb_cap_descr));
+ else
+ wusb_dev->wusb_cap_descr = itr;
+ break;
+ default:
+ dev_err(dev, "BUG? Unknown BOS capability 0x%02x "
+ "(%zu bytes) at offset 0x%02x\n", cap_type,
+ cap_size, (int)(itr - (void *)bos));
+ }
+ itr += cap_size;
+ }
+ result = 0;
+error_bad_cap:
+ return result;
+}
+
+/*
+ * Add information from the BOS descriptors to the device
+ *
+ * @usb_dev: referenced
+ * @wusb_dev: referenced and unlocked
+ *
+ * So what we do is we alloc a space for the BOS descriptor of 64
+ * bytes; read the first four bytes which include the wTotalLength
+ * field (WUSB1.0[T7-26]) and if it fits in those 64 bytes, read the
+ * whole thing. If not we realloc to that size.
+ *
+ * Then we call the groking function, that will fill up
+ * wusb_dev->wusb_cap_descr, which is what we'll need later on.
+ */
+static int wusb_dev_bos_add(struct usb_device *usb_dev,
+ struct wusb_dev *wusb_dev)
+{
+ ssize_t result;
+ struct device *dev = &usb_dev->dev;
+ struct usb_bos_descriptor *bos;
+ size_t alloc_size = 32, desc_size = 4;
+
+ bos = kmalloc(alloc_size, GFP_KERNEL);
+ if (bos == NULL)
+ return -ENOMEM;
+ result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
+ if (result < 4) {
+ dev_err(dev, "Can't get BOS descriptor or too short: %zd\n",
+ result);
+ goto error_get_descriptor;
+ }
+ desc_size = le16_to_cpu(bos->wTotalLength);
+ if (desc_size >= alloc_size) {
+ kfree(bos);
+ alloc_size = desc_size;
+ bos = kmalloc(alloc_size, GFP_KERNEL);
+ if (bos == NULL)
+ return -ENOMEM;
+ }
+ result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size);
+ if (result < 0 || result != desc_size) {
+ dev_err(dev, "Can't get BOS descriptor or too short (need "
+ "%zu bytes): %zd\n", desc_size, result);
+ goto error_get_descriptor;
+ }
+ if (result < sizeof(*bos)
+ || le16_to_cpu(bos->wTotalLength) != desc_size) {
+ dev_err(dev, "Can't get BOS descriptor or too short (need "
+ "%zu bytes): %zd\n", desc_size, result);
+ goto error_get_descriptor;
+ }
+
+ result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result);
+ if (result < 0)
+ goto error_bad_bos;
+ wusb_dev->bos = bos;
+ return 0;
+
+error_bad_bos:
+error_get_descriptor:
+ kfree(bos);
+ wusb_dev->wusb_cap_descr = NULL;
+ return result;
+}
+
+static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev)
+{
+ kfree(wusb_dev->bos);
+ wusb_dev->wusb_cap_descr = NULL;
+};
+
+/*
+ * USB stack's device addition Notifier Callback
+ *
+ * Called from drivers/usb/core/hub.c when a new device is added; we
+ * use this hook to perform certain WUSB specific setup work on the
+ * new device. As well, it is the first time we can connect the
+ * wusb_dev and the usb_dev. So we note it down in wusb_dev and take a
+ * reference that we'll drop.
+ *
+ * First we need to determine if the device is a WUSB device (else we
+ * ignore it). For that we use the speed setting (USB_SPEED_WIRELESS)
+ * [FIXME: maybe we'd need something more definitive]. If so, we track
+ * it's usb_busd and from there, the WUSB HC.
+ *
+ * Because all WUSB HCs are contained in a 'struct wusbhc', voila, we
+ * get the wusbhc for the device.
+ *
+ * We have a reference on @usb_dev (as we are called at the end of its
+ * enumeration).
+ *
+ * NOTE: @usb_dev locked
+ */
+static void wusb_dev_add_ncb(struct usb_device *usb_dev)
+{
+ int result = 0;
+ struct wusb_dev *wusb_dev;
+ struct wusbhc *wusbhc;
+ struct device *dev = &usb_dev->dev;
+ u8 port_idx;
+
+ if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
+ return; /* skip non wusb and wusb RHs */
+
+ usb_set_device_state(usb_dev, USB_STATE_UNAUTHENTICATED);
+
+ wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+ if (wusbhc == NULL)
+ goto error_nodev;
+ mutex_lock(&wusbhc->mutex);
+ wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
+ port_idx = wusb_port_no_to_idx(usb_dev->portnum);
+ mutex_unlock(&wusbhc->mutex);
+ if (wusb_dev == NULL)
+ goto error_nodev;
+ wusb_dev->usb_dev = usb_get_dev(usb_dev);
+ usb_dev->wusb_dev = wusb_dev_get(wusb_dev);
+ result = wusb_dev_sec_add(wusbhc, usb_dev, wusb_dev);
+ if (result < 0) {
+ dev_err(dev, "Cannot enable security: %d\n", result);
+ goto error_sec_add;
+ }
+ /* Now query the device for it's BOS and attach it to wusb_dev */
+ result = wusb_dev_bos_add(usb_dev, wusb_dev);
+ if (result < 0) {
+ dev_err(dev, "Cannot get BOS descriptors: %d\n", result);
+ goto error_bos_add;
+ }
+ result = wusb_dev_sysfs_add(wusbhc, usb_dev, wusb_dev);
+ if (result < 0)
+ goto error_add_sysfs;
+out:
+ wusb_dev_put(wusb_dev);
+ wusbhc_put(wusbhc);
+error_nodev:
+ return;
+
+error_add_sysfs:
+ wusb_dev_bos_rm(wusb_dev);
+error_bos_add:
+ wusb_dev_sec_rm(wusb_dev);
+error_sec_add:
+ mutex_lock(&wusbhc->mutex);
+ __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx));
+ mutex_unlock(&wusbhc->mutex);
+ goto out;
+}
+
+/*
+ * Undo all the steps done at connection by the notifier callback
+ *
+ * NOTE: @usb_dev locked
+ */
+static void wusb_dev_rm_ncb(struct usb_device *usb_dev)
+{
+ struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
+
+ if (usb_dev->wusb == 0 || usb_dev->devnum == 1)
+ return; /* skip non wusb and wusb RHs */
+
+ wusb_dev_sysfs_rm(wusb_dev);
+ wusb_dev_bos_rm(wusb_dev);
+ wusb_dev_sec_rm(wusb_dev);
+ wusb_dev->usb_dev = NULL;
+ usb_dev->wusb_dev = NULL;
+ wusb_dev_put(wusb_dev);
+ usb_put_dev(usb_dev);
+}
+
+/*
+ * Handle notifications from the USB stack (notifier call back)
+ *
+ * This is called when the USB stack does a
+ * usb_{bus,device}_{add,remove}() so we can do WUSB specific
+ * handling. It is called with [for the case of
+ * USB_DEVICE_{ADD,REMOVE} with the usb_dev locked.
+ */
+int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
+ void *priv)
+{
+ int result = NOTIFY_OK;
+
+ switch (val) {
+ case USB_DEVICE_ADD:
+ wusb_dev_add_ncb(priv);
+ break;
+ case USB_DEVICE_REMOVE:
+ wusb_dev_rm_ncb(priv);
+ break;
+ case USB_BUS_ADD:
+ /* ignore (for now) */
+ case USB_BUS_REMOVE:
+ break;
+ default:
+ WARN_ON(1);
+ result = NOTIFY_BAD;
+ }
+ return result;
+}
+
+/*
+ * Return a referenced wusb_dev given a @wusbhc and @usb_dev
+ */
+struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *wusbhc,
+ struct usb_device *usb_dev)
+{
+ struct wusb_dev *wusb_dev;
+ u8 port_idx;
+
+ port_idx = wusb_port_no_to_idx(usb_dev->portnum);
+ BUG_ON(port_idx > wusbhc->ports_max);
+ wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev;
+ if (wusb_dev != NULL) /* ops, device is gone */
+ wusb_dev_get(wusb_dev);
+ return wusb_dev;
+}
+EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev);
+
+void wusb_dev_destroy(struct kref *_wusb_dev)
+{
+ struct wusb_dev *wusb_dev = container_of(_wusb_dev, struct wusb_dev, refcnt);
+
+ list_del_init(&wusb_dev->cack_node);
+ wusb_dev_free(wusb_dev);
+}
+EXPORT_SYMBOL_GPL(wusb_dev_destroy);
+
+/*
+ * Create all the device connect handling infrastructure
+ *
+ * This is basically the device info array, Connect Acknowledgement
+ * (cack) lists, keep-alive timers (and delayed work thread).
+ */
+int wusbhc_devconnect_create(struct wusbhc *wusbhc)
+{
+ wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE;
+ wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr);
+ INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run);
+
+ wusbhc->cack_ie.hdr.bIEIdentifier = WUIE_ID_CONNECTACK;
+ wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr);
+ INIT_LIST_HEAD(&wusbhc->cack_list);
+
+ return 0;
+}
+
+/*
+ * Release all resources taken by the devconnect stuff
+ */
+void wusbhc_devconnect_destroy(struct wusbhc *wusbhc)
+{
+ /* no op */
+}
+
+/*
+ * wusbhc_devconnect_start - start accepting device connections
+ * @wusbhc: the WUSB HC
+ *
+ * Sets the Host Info IE to accept all new connections.
+ *
+ * FIXME: This also enables the keep alives but this is not necessary
+ * until there are connected and authenticated devices.
+ */
+int wusbhc_devconnect_start(struct wusbhc *wusbhc)
+{
+ struct device *dev = wusbhc->dev;
+ struct wuie_host_info *hi;
+ int result;
+
+ hi = kzalloc(sizeof(*hi), GFP_KERNEL);
+ if (hi == NULL)
+ return -ENOMEM;
+
+ hi->hdr.bLength = sizeof(*hi);
+ hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO;
+ hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL);
+ hi->CHID = wusbhc->chid;
+ result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr);
+ if (result < 0) {
+ dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result);
+ goto error_mmcie_set;
+ }
+ wusbhc->wuie_host_info = hi;
+
+ queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
+ msecs_to_jiffies(wusbhc->trust_timeout / 2));
+
+ return 0;
+
+error_mmcie_set:
+ kfree(hi);
+ return result;
+}
+
+/*
+ * wusbhc_devconnect_stop - stop managing connected devices
+ * @wusbhc: the WUSB HC
+ *
+ * Disconnects any devices still connected, stops the keep alives and
+ * removes the Host Info IE.
+ */
+void wusbhc_devconnect_stop(struct wusbhc *wusbhc)
+{
+ int i;
+
+ mutex_lock(&wusbhc->mutex);
+ for (i = 0; i < wusbhc->ports_max; i++) {
+ if (wusbhc->port[i].wusb_dev)
+ __wusbhc_dev_disconnect(wusbhc, &wusbhc->port[i]);
+ }
+ mutex_unlock(&wusbhc->mutex);
+
+ cancel_delayed_work_sync(&wusbhc->keep_alive_timer);
+ wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr);
+ kfree(wusbhc->wuie_host_info);
+ wusbhc->wuie_host_info = NULL;
+}
+
+/*
+ * wusb_set_dev_addr - set the WUSB device address used by the host
+ * @wusbhc: the WUSB HC the device is connect to
+ * @wusb_dev: the WUSB device
+ * @addr: new device address
+ */
+int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr)
+{
+ int result;
+
+ wusb_dev->addr = addr;
+ result = wusbhc->dev_info_set(wusbhc, wusb_dev);
+ if (result < 0)
+ dev_err(wusbhc->dev, "device %d: failed to set device "
+ "address\n", wusb_dev->port_idx);
+ else
+ dev_info(wusbhc->dev, "device %d: %s addr %u\n",
+ wusb_dev->port_idx,
+ (addr & WUSB_DEV_ADDR_UNAUTH) ? "unauth" : "auth",
+ wusb_dev->addr);
+
+ return result;
+}
diff --git a/drivers/staging/wusbcore/host/Kconfig b/drivers/staging/wusbcore/host/Kconfig
new file mode 100644
index 000000000000..9a73f9360a08
--- /dev/null
+++ b/drivers/staging/wusbcore/host/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config USB_WHCI_HCD
+ tristate "Wireless USB Host Controller Interface (WHCI) driver"
+ depends on USB_PCI && USB && UWB
+ select USB_WUSB
+ select UWB_WHCI
+ help
+ A driver for PCI-based Wireless USB Host Controllers that are
+ compliant with the WHCI specification.
+
+ To compile this driver a module, choose M here: the module
+ will be called "whci-hcd".
+
+config USB_HWA_HCD
+ tristate "Host Wire Adapter (HWA) driver"
+ depends on USB && UWB
+ select USB_WUSB
+ select UWB_HWA
+ help
+ This driver enables you to connect Wireless USB devices to
+ your system using a Host Wire Adaptor USB dongle. This is an
+ UWB Radio Controller and WUSB Host Controller connected to
+ your machine via USB (specified in WUSB1.0).
+
+ To compile this driver a module, choose M here: the module
+ will be called "hwa-hc".
+
diff --git a/drivers/staging/wusbcore/host/Makefile b/drivers/staging/wusbcore/host/Makefile
new file mode 100644
index 000000000000..d65ee8a73e21
--- /dev/null
+++ b/drivers/staging/wusbcore/host/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_USB_WHCI_HCD) += whci/
+obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
diff --git a/drivers/staging/wusbcore/host/hwa-hc.c b/drivers/staging/wusbcore/host/hwa-hc.c
new file mode 100644
index 000000000000..8d959e91fe27
--- /dev/null
+++ b/drivers/staging/wusbcore/host/hwa-hc.c
@@ -0,0 +1,875 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Host Wire Adapter:
+ * Driver glue, HWA-specific functions, bridges to WAHC and WUSBHC
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * The HWA driver is a simple layer that forwards requests to the WAHC
+ * (Wire Adater Host Controller) or WUSBHC (Wireless USB Host
+ * Controller) layers.
+ *
+ * Host Wire Adapter is the 'WUSB 1.0 standard' name for Wireless-USB
+ * Host Controller that is connected to your system via USB (a USB
+ * dongle that implements a USB host...). There is also a Device Wired
+ * Adaptor, DWA (Wireless USB hub) that uses the same mechanism for
+ * transferring data (it is after all a USB host connected via
+ * Wireless USB), we have a common layer called Wire Adapter Host
+ * Controller that does all the hard work. The WUSBHC (Wireless USB
+ * Host Controller) is the part common to WUSB Host Controllers, the
+ * HWA and the PCI-based one, that is implemented following the WHCI
+ * spec. All these layers are implemented in ../wusbcore.
+ *
+ * The main functions are hwahc_op_urb_{en,de}queue(), that pass the
+ * job of converting a URB to a Wire Adapter
+ *
+ * Entry points:
+ *
+ * hwahc_driver_*() Driver initialization, registration and
+ * teardown.
+ *
+ * hwahc_probe() New device came up, create an instance for
+ * it [from device enumeration].
+ *
+ * hwahc_disconnect() Remove device instance [from device
+ * enumeration].
+ *
+ * [__]hwahc_op_*() Host-Wire-Adaptor specific functions for
+ * starting/stopping/etc (some might be made also
+ * DWA).
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/completion.h>
+#include "../wa-hc.h"
+#include "../wusbhc.h"
+
+struct hwahc {
+ struct wusbhc wusbhc; /* has to be 1st */
+ struct wahc wa;
+};
+
+/*
+ * FIXME should be wusbhc
+ *
+ * NOTE: we need to cache the Cluster ID because later...there is no
+ * way to get it :)
+ */
+static int __hwahc_set_cluster_id(struct hwahc *hwahc, u8 cluster_id)
+{
+ int result;
+ struct wusbhc *wusbhc = &hwahc->wusbhc;
+ struct wahc *wa = &hwahc->wa;
+ struct device *dev = &wa->usb_iface->dev;
+
+ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_SET_CLUSTER_ID,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ cluster_id,
+ wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (result < 0)
+ dev_err(dev, "Cannot set WUSB Cluster ID to 0x%02x: %d\n",
+ cluster_id, result);
+ else
+ wusbhc->cluster_id = cluster_id;
+ dev_info(dev, "Wireless USB Cluster ID set to 0x%02x\n", cluster_id);
+ return result;
+}
+
+static int __hwahc_op_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
+{
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+
+ return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_SET_NUM_DNTS,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ interval << 8 | slots,
+ wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+/*
+ * Reset a WUSB host controller and wait for it to complete doing it.
+ *
+ * @usb_hcd: Pointer to WUSB Host Controller instance.
+ *
+ */
+static int hwahc_op_reset(struct usb_hcd *usb_hcd)
+{
+ int result;
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct device *dev = &hwahc->wa.usb_iface->dev;
+
+ mutex_lock(&wusbhc->mutex);
+ wa_nep_disarm(&hwahc->wa);
+ result = __wa_set_feature(&hwahc->wa, WA_RESET);
+ if (result < 0) {
+ dev_err(dev, "error commanding HC to reset: %d\n", result);
+ goto error_unlock;
+ }
+ result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0);
+ if (result < 0) {
+ dev_err(dev, "error waiting for HC to reset: %d\n", result);
+ goto error_unlock;
+ }
+error_unlock:
+ mutex_unlock(&wusbhc->mutex);
+ return result;
+}
+
+/*
+ * FIXME: break this function up
+ */
+static int hwahc_op_start(struct usb_hcd *usb_hcd)
+{
+ u8 addr;
+ int result;
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+ result = -ENOSPC;
+ mutex_lock(&wusbhc->mutex);
+ addr = wusb_cluster_id_get();
+ if (addr == 0)
+ goto error_cluster_id_get;
+ result = __hwahc_set_cluster_id(hwahc, addr);
+ if (result < 0)
+ goto error_set_cluster_id;
+
+ usb_hcd->uses_new_polling = 1;
+ set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
+ usb_hcd->state = HC_STATE_RUNNING;
+
+ /*
+ * prevent USB core from suspending the root hub since
+ * bus_suspend and bus_resume are not yet supported.
+ */
+ pm_runtime_get_noresume(&usb_hcd->self.root_hub->dev);
+
+ result = 0;
+out:
+ mutex_unlock(&wusbhc->mutex);
+ return result;
+
+error_set_cluster_id:
+ wusb_cluster_id_put(addr);
+error_cluster_id_get:
+ goto out;
+
+}
+
+/*
+ * No need to abort pipes, as when this is called, all the children
+ * has been disconnected and that has done it [through
+ * usb_disable_interface() -> usb_disable_endpoint() ->
+ * hwahc_op_ep_disable() - >rpipe_ep_disable()].
+ */
+static void hwahc_op_stop(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+
+ mutex_lock(&wusbhc->mutex);
+ wusb_cluster_id_put(wusbhc->cluster_id);
+ mutex_unlock(&wusbhc->mutex);
+}
+
+static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+
+ /*
+ * We cannot query the HWA for the WUSB time since that requires sending
+ * a synchronous URB and this function can be called in_interrupt.
+ * Instead, query the USB frame number for our parent and use that.
+ */
+ return usb_get_current_frame_number(wa->usb_dev);
+}
+
+static int hwahc_op_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
+ gfp_t gfp)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+ return wa_urb_enqueue(&hwahc->wa, urb->ep, urb, gfp);
+}
+
+static int hwahc_op_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb,
+ int status)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+ return wa_urb_dequeue(&hwahc->wa, urb, status);
+}
+
+/*
+ * Release resources allocated for an endpoint
+ *
+ * If there is an associated rpipe to this endpoint, go ahead and put it.
+ */
+static void hwahc_op_endpoint_disable(struct usb_hcd *usb_hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+ rpipe_ep_disable(&hwahc->wa, ep);
+}
+
+static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc)
+{
+ int result;
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct device *dev = &hwahc->wa.usb_iface->dev;
+
+ result = __wa_set_feature(&hwahc->wa, WA_ENABLE);
+ if (result < 0) {
+ dev_err(dev, "error commanding HC to start: %d\n", result);
+ goto error_stop;
+ }
+ result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE);
+ if (result < 0) {
+ dev_err(dev, "error waiting for HC to start: %d\n", result);
+ goto error_stop;
+ }
+ result = wa_nep_arm(&hwahc->wa, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "cannot listen to notifications: %d\n", result);
+ goto error_stop;
+ }
+ /*
+ * If WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS is set,
+ * disable transfer notifications.
+ */
+ if (hwahc->wa.quirks &
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS) {
+ struct usb_host_interface *cur_altsetting =
+ hwahc->wa.usb_iface->cur_altsetting;
+
+ result = usb_control_msg(hwahc->wa.usb_dev,
+ usb_sndctrlpipe(hwahc->wa.usb_dev, 0),
+ WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_INTERFACE,
+ WA_REQ_ALEREON_FEATURE_SET,
+ cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ /*
+ * If we successfully sent the control message, start DTI here
+ * because no transfer notifications will be received which is
+ * where DTI is normally started.
+ */
+ if (result == 0)
+ result = wa_dti_start(&hwahc->wa);
+ else
+ result = 0; /* OK. Continue normally. */
+
+ if (result < 0) {
+ dev_err(dev, "cannot start DTI: %d\n", result);
+ goto error_dti_start;
+ }
+ }
+
+ return result;
+
+error_dti_start:
+ wa_nep_disarm(&hwahc->wa);
+error_stop:
+ __wa_clear_feature(&hwahc->wa, WA_ENABLE);
+ return result;
+}
+
+static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay)
+{
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+ int ret;
+
+ ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_CHAN_STOP,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ delay * 1000,
+ iface_no,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (ret == 0)
+ msleep(delay);
+
+ wa_nep_disarm(&hwahc->wa);
+ __wa_stop(&hwahc->wa);
+}
+
+/*
+ * Set the UWB MAS allocation for the WUSB cluster
+ *
+ * @stream_index: stream to use (-1 for cancelling the allocation)
+ * @mas: mas bitmap to use
+ */
+static int __hwahc_op_bwa_set(struct wusbhc *wusbhc, s8 stream_index,
+ const struct uwb_mas_bm *mas)
+{
+ int result;
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ struct device *dev = &wa->usb_iface->dev;
+ u8 mas_le[UWB_NUM_MAS/8];
+
+ /* Set the stream index */
+ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_SET_STREAM_IDX,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ stream_index,
+ wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (result < 0) {
+ dev_err(dev, "Cannot set WUSB stream index: %d\n", result);
+ goto out;
+ }
+ uwb_mas_bm_copy_le(mas_le, mas);
+ /* Set the MAS allocation */
+ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_SET_WUSB_MAS,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ mas_le, 32, USB_CTRL_SET_TIMEOUT);
+ if (result < 0)
+ dev_err(dev, "Cannot set WUSB MAS allocation: %d\n", result);
+out:
+ return result;
+}
+
+/*
+ * Add an IE to the host's MMC
+ *
+ * @interval: See WUSB1.0[8.5.3.1]
+ * @repeat_cnt: See WUSB1.0[8.5.3.1]
+ * @handle: See WUSB1.0[8.5.3.1]
+ * @wuie: Pointer to the header of the WUSB IE data to add.
+ * MUST BE allocated in a kmalloc buffer (no stack or
+ * vmalloc).
+ *
+ * NOTE: the format of the WUSB IEs for MMCs are different to the
+ * normal MBOA MAC IEs (IE Id + Length in MBOA MAC vs. Length +
+ * Id in WUSB IEs). Standards...you gotta love'em.
+ */
+static int __hwahc_op_mmcie_add(struct wusbhc *wusbhc, u8 interval,
+ u8 repeat_cnt, u8 handle,
+ struct wuie_hdr *wuie)
+{
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+
+ return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_ADD_MMC_IE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ interval << 8 | repeat_cnt,
+ handle << 8 | iface_no,
+ wuie, wuie->bLength, USB_CTRL_SET_TIMEOUT);
+}
+
+/*
+ * Remove an IE to the host's MMC
+ *
+ * @handle: See WUSB1.0[8.5.3.1]
+ */
+static int __hwahc_op_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
+{
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+ return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_REMOVE_MMC_IE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, handle << 8 | iface_no,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+/*
+ * Update device information for a given fake port
+ *
+ * @port_idx: Fake port to which device is connected (wusbhc index, not
+ * USB port number).
+ */
+static int __hwahc_op_dev_info_set(struct wusbhc *wusbhc,
+ struct wusb_dev *wusb_dev)
+{
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+ struct hwa_dev_info *dev_info;
+ int ret;
+
+ /* fill out the Device Info buffer and send it */
+ dev_info = kzalloc(sizeof(struct hwa_dev_info), GFP_KERNEL);
+ if (!dev_info)
+ return -ENOMEM;
+ uwb_mas_bm_copy_le(dev_info->bmDeviceAvailability,
+ &wusb_dev->availability);
+ dev_info->bDeviceAddress = wusb_dev->addr;
+
+ /*
+ * If the descriptors haven't been read yet, use a default PHY
+ * rate of 53.3 Mbit/s only. The correct value will be used
+ * when this will be called again as part of the
+ * authentication process (which occurs after the descriptors
+ * have been read).
+ */
+ if (wusb_dev->wusb_cap_descr)
+ dev_info->wPHYRates = wusb_dev->wusb_cap_descr->wPHYRates;
+ else
+ dev_info->wPHYRates = cpu_to_le16(USB_WIRELESS_PHY_53);
+
+ ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ WUSB_REQ_SET_DEV_INFO,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, wusb_dev->port_idx << 8 | iface_no,
+ dev_info, sizeof(struct hwa_dev_info),
+ USB_CTRL_SET_TIMEOUT);
+ kfree(dev_info);
+ return ret;
+}
+
+/*
+ * Set host's idea of which encryption (and key) method to use when
+ * talking to ad evice on a given port.
+ *
+ * If key is NULL, it means disable encryption for that "virtual port"
+ * (used when we disconnect).
+ */
+static int __hwahc_dev_set_key(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+ const void *key, size_t key_size,
+ u8 key_idx)
+{
+ int result = -ENOMEM;
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+ struct usb_key_descriptor *keyd;
+ size_t keyd_len;
+
+ keyd_len = sizeof(*keyd) + key_size;
+ keyd = kzalloc(keyd_len, GFP_KERNEL);
+ if (keyd == NULL)
+ return -ENOMEM;
+
+ keyd->bLength = keyd_len;
+ keyd->bDescriptorType = USB_DT_KEY;
+ keyd->tTKID[0] = (tkid >> 0) & 0xff;
+ keyd->tTKID[1] = (tkid >> 8) & 0xff;
+ keyd->tTKID[2] = (tkid >> 16) & 0xff;
+ memcpy(keyd->bKeyData, key, key_size);
+
+ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ USB_REQ_SET_DESCRIPTOR,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ USB_DT_KEY << 8 | key_idx,
+ port_idx << 8 | iface_no,
+ keyd, keyd_len, USB_CTRL_SET_TIMEOUT);
+
+ kzfree(keyd); /* clear keys etc. */
+ return result;
+}
+
+/*
+ * Set host's idea of which encryption (and key) method to use when
+ * talking to ad evice on a given port.
+ *
+ * If key is NULL, it means disable encryption for that "virtual port"
+ * (used when we disconnect).
+ */
+static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+ const void *key, size_t key_size)
+{
+ int result = -ENOMEM;
+ struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ struct wahc *wa = &hwahc->wa;
+ u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber;
+ u8 encryption_value;
+
+ /* Tell the host which key to use to talk to the device */
+ if (key) {
+ u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_PTK,
+ WUSB_KEY_INDEX_ORIGINATOR_HOST);
+
+ result = __hwahc_dev_set_key(wusbhc, port_idx, tkid,
+ key, key_size, key_idx);
+ if (result < 0)
+ goto error_set_key;
+ encryption_value = wusbhc->ccm1_etd->bEncryptionValue;
+ } else {
+ /* FIXME: this should come from wusbhc->etd[UNSECURE].value */
+ encryption_value = 0;
+ }
+
+ /* Set the encryption type for communicating with the device */
+ result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ USB_REQ_SET_ENCRYPTION,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ encryption_value, port_idx << 8 | iface_no,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (result < 0)
+ dev_err(wusbhc->dev, "Can't set host's WUSB encryption for "
+ "port index %u to %s (value %d): %d\n", port_idx,
+ wusb_et_name(wusbhc->ccm1_etd->bEncryptionType),
+ wusbhc->ccm1_etd->bEncryptionValue, result);
+error_set_key:
+ return result;
+}
+
+/*
+ * Set host's GTK key
+ */
+static int __hwahc_op_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+ const void *key, size_t key_size)
+{
+ u8 key_idx = wusb_key_index(0, WUSB_KEY_INDEX_TYPE_GTK,
+ WUSB_KEY_INDEX_ORIGINATOR_HOST);
+
+ return __hwahc_dev_set_key(wusbhc, 0, tkid, key, key_size, key_idx);
+}
+
+/*
+ * Get the Wire Adapter class-specific descriptor
+ *
+ * NOTE: this descriptor comes with the big bundled configuration
+ * descriptor that includes the interfaces' and endpoints', so
+ * we just look for it in the cached copy kept by the USB stack.
+ *
+ * NOTE2: We convert LE fields to CPU order.
+ */
+static int wa_fill_descr(struct wahc *wa)
+{
+ int result;
+ struct device *dev = &wa->usb_iface->dev;
+ char *itr;
+ struct usb_device *usb_dev = wa->usb_dev;
+ struct usb_descriptor_header *hdr;
+ struct usb_wa_descriptor *wa_descr;
+ size_t itr_size, actconfig_idx;
+
+ actconfig_idx = (usb_dev->actconfig - usb_dev->config) /
+ sizeof(usb_dev->config[0]);
+ itr = usb_dev->rawdescriptors[actconfig_idx];
+ itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
+ while (itr_size >= sizeof(*hdr)) {
+ hdr = (struct usb_descriptor_header *) itr;
+ dev_dbg(dev, "Extra device descriptor: "
+ "type %02x/%u bytes @ %zu (%zu left)\n",
+ hdr->bDescriptorType, hdr->bLength,
+ (itr - usb_dev->rawdescriptors[actconfig_idx]),
+ itr_size);
+ if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER)
+ goto found;
+ itr += hdr->bLength;
+ itr_size -= hdr->bLength;
+ }
+ dev_err(dev, "cannot find Wire Adapter Class descriptor\n");
+ return -ENODEV;
+
+found:
+ result = -EINVAL;
+ if (hdr->bLength > itr_size) { /* is it available? */
+ dev_err(dev, "incomplete Wire Adapter Class descriptor "
+ "(%zu bytes left, %u needed)\n",
+ itr_size, hdr->bLength);
+ goto error;
+ }
+ if (hdr->bLength < sizeof(*wa->wa_descr)) {
+ dev_err(dev, "short Wire Adapter Class descriptor\n");
+ goto error;
+ }
+ wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
+ if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100)
+ dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
+ (le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00) >> 8,
+ le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff);
+ result = 0;
+error:
+ return result;
+}
+
+static const struct hc_driver hwahc_hc_driver = {
+ .description = "hwa-hcd",
+ .product_desc = "Wireless USB HWA host controller",
+ .hcd_priv_size = sizeof(struct hwahc) - sizeof(struct usb_hcd),
+ .irq = NULL, /* FIXME */
+ .flags = HCD_USB25,
+ .reset = hwahc_op_reset,
+ .start = hwahc_op_start,
+ .stop = hwahc_op_stop,
+ .get_frame_number = hwahc_op_get_frame_number,
+ .urb_enqueue = hwahc_op_urb_enqueue,
+ .urb_dequeue = hwahc_op_urb_dequeue,
+ .endpoint_disable = hwahc_op_endpoint_disable,
+
+ .hub_status_data = wusbhc_rh_status_data,
+ .hub_control = wusbhc_rh_control,
+ .start_port_reset = wusbhc_rh_start_port_reset,
+};
+
+static int hwahc_security_create(struct hwahc *hwahc)
+{
+ int result;
+ struct wusbhc *wusbhc = &hwahc->wusbhc;
+ struct usb_device *usb_dev = hwahc->wa.usb_dev;
+ struct device *dev = &usb_dev->dev;
+ struct usb_security_descriptor *secd;
+ struct usb_encryption_descriptor *etd;
+ void *itr, *top;
+ size_t itr_size, needed, bytes;
+ u8 index;
+ char buf[64];
+
+ /* Find the host's security descriptors in the config descr bundle */
+ index = (usb_dev->actconfig - usb_dev->config) /
+ sizeof(usb_dev->config[0]);
+ itr = usb_dev->rawdescriptors[index];
+ itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength);
+ top = itr + itr_size;
+ result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
+ le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
+ USB_DT_SECURITY, (void **) &secd, sizeof(*secd));
+ if (result == -1) {
+ dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
+ return 0;
+ }
+ needed = sizeof(*secd);
+ if (top - (void *)secd < needed) {
+ dev_err(dev, "BUG? Not enough data to process security "
+ "descriptor header (%zu bytes left vs %zu needed)\n",
+ top - (void *) secd, needed);
+ return 0;
+ }
+ needed = le16_to_cpu(secd->wTotalLength);
+ if (top - (void *)secd < needed) {
+ dev_err(dev, "BUG? Not enough data to process security "
+ "descriptors (%zu bytes left vs %zu needed)\n",
+ top - (void *) secd, needed);
+ return 0;
+ }
+ /* Walk over the sec descriptors and store CCM1's on wusbhc */
+ itr = (void *) secd + sizeof(*secd);
+ top = (void *) secd + le16_to_cpu(secd->wTotalLength);
+ index = 0;
+ bytes = 0;
+ while (itr < top) {
+ etd = itr;
+ if (top - itr < sizeof(*etd)) {
+ dev_err(dev, "BUG: bad host security descriptor; "
+ "not enough data (%zu vs %zu left)\n",
+ top - itr, sizeof(*etd));
+ break;
+ }
+ if (etd->bLength < sizeof(*etd)) {
+ dev_err(dev, "BUG: bad host encryption descriptor; "
+ "descriptor is too short "
+ "(%zu vs %zu needed)\n",
+ (size_t)etd->bLength, sizeof(*etd));
+ break;
+ }
+ itr += etd->bLength;
+ bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
+ "%s (0x%02x) ",
+ wusb_et_name(etd->bEncryptionType),
+ etd->bEncryptionValue);
+ wusbhc->ccm1_etd = etd;
+ }
+ dev_info(dev, "supported encryption types: %s\n", buf);
+ if (wusbhc->ccm1_etd == NULL) {
+ dev_err(dev, "E: host doesn't support CCM-1 crypto\n");
+ return 0;
+ }
+ /* Pretty print what we support */
+ return 0;
+}
+
+static void hwahc_security_release(struct hwahc *hwahc)
+{
+ /* nothing to do here so far... */
+}
+
+static int hwahc_create(struct hwahc *hwahc, struct usb_interface *iface,
+ kernel_ulong_t quirks)
+{
+ int result;
+ struct device *dev = &iface->dev;
+ struct wusbhc *wusbhc = &hwahc->wusbhc;
+ struct wahc *wa = &hwahc->wa;
+ struct usb_device *usb_dev = interface_to_usbdev(iface);
+
+ wa->usb_dev = usb_get_dev(usb_dev); /* bind the USB device */
+ wa->usb_iface = usb_get_intf(iface);
+ wusbhc->dev = dev;
+ /* defer getting the uwb_rc handle until it is needed since it
+ * may not have been registered by the hwa_rc driver yet. */
+ wusbhc->uwb_rc = NULL;
+ result = wa_fill_descr(wa); /* Get the device descriptor */
+ if (result < 0)
+ goto error_fill_descriptor;
+ if (wa->wa_descr->bNumPorts > USB_MAXCHILDREN) {
+ dev_err(dev, "FIXME: USB_MAXCHILDREN too low for WUSB "
+ "adapter (%u ports)\n", wa->wa_descr->bNumPorts);
+ wusbhc->ports_max = USB_MAXCHILDREN;
+ } else {
+ wusbhc->ports_max = wa->wa_descr->bNumPorts;
+ }
+ wusbhc->mmcies_max = wa->wa_descr->bNumMMCIEs;
+ wusbhc->start = __hwahc_op_wusbhc_start;
+ wusbhc->stop = __hwahc_op_wusbhc_stop;
+ wusbhc->mmcie_add = __hwahc_op_mmcie_add;
+ wusbhc->mmcie_rm = __hwahc_op_mmcie_rm;
+ wusbhc->dev_info_set = __hwahc_op_dev_info_set;
+ wusbhc->bwa_set = __hwahc_op_bwa_set;
+ wusbhc->set_num_dnts = __hwahc_op_set_num_dnts;
+ wusbhc->set_ptk = __hwahc_op_set_ptk;
+ wusbhc->set_gtk = __hwahc_op_set_gtk;
+ result = hwahc_security_create(hwahc);
+ if (result < 0) {
+ dev_err(dev, "Can't initialize security: %d\n", result);
+ goto error_security_create;
+ }
+ wa->wusb = wusbhc; /* FIXME: ugly, need to fix */
+ result = wusbhc_create(&hwahc->wusbhc);
+ if (result < 0) {
+ dev_err(dev, "Can't create WUSB HC structures: %d\n", result);
+ goto error_wusbhc_create;
+ }
+ result = wa_create(&hwahc->wa, iface, quirks);
+ if (result < 0)
+ goto error_wa_create;
+ return 0;
+
+error_wa_create:
+ wusbhc_destroy(&hwahc->wusbhc);
+error_wusbhc_create:
+ /* WA Descr fill allocs no resources */
+error_security_create:
+error_fill_descriptor:
+ usb_put_intf(iface);
+ usb_put_dev(usb_dev);
+ return result;
+}
+
+static void hwahc_destroy(struct hwahc *hwahc)
+{
+ struct wusbhc *wusbhc = &hwahc->wusbhc;
+
+ mutex_lock(&wusbhc->mutex);
+ __wa_destroy(&hwahc->wa);
+ wusbhc_destroy(&hwahc->wusbhc);
+ hwahc_security_release(hwahc);
+ hwahc->wusbhc.dev = NULL;
+ uwb_rc_put(wusbhc->uwb_rc);
+ usb_put_intf(hwahc->wa.usb_iface);
+ usb_put_dev(hwahc->wa.usb_dev);
+ mutex_unlock(&wusbhc->mutex);
+}
+
+static void hwahc_init(struct hwahc *hwahc)
+{
+ wa_init(&hwahc->wa);
+}
+
+static int hwahc_probe(struct usb_interface *usb_iface,
+ const struct usb_device_id *id)
+{
+ int result;
+ struct usb_hcd *usb_hcd;
+ struct wusbhc *wusbhc;
+ struct hwahc *hwahc;
+ struct device *dev = &usb_iface->dev;
+
+ result = -ENOMEM;
+ usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa");
+ if (usb_hcd == NULL) {
+ dev_err(dev, "unable to allocate instance\n");
+ goto error_alloc;
+ }
+ usb_hcd->wireless = 1;
+ usb_hcd->self.sg_tablesize = ~0;
+ wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+ hwahc_init(hwahc);
+ result = hwahc_create(hwahc, usb_iface, id->driver_info);
+ if (result < 0) {
+ dev_err(dev, "Cannot initialize internals: %d\n", result);
+ goto error_hwahc_create;
+ }
+ result = usb_add_hcd(usb_hcd, 0, 0);
+ if (result < 0) {
+ dev_err(dev, "Cannot add HCD: %d\n", result);
+ goto error_add_hcd;
+ }
+ device_wakeup_enable(usb_hcd->self.controller);
+ result = wusbhc_b_create(&hwahc->wusbhc);
+ if (result < 0) {
+ dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result);
+ goto error_wusbhc_b_create;
+ }
+ return 0;
+
+error_wusbhc_b_create:
+ usb_remove_hcd(usb_hcd);
+error_add_hcd:
+ hwahc_destroy(hwahc);
+error_hwahc_create:
+ usb_put_hcd(usb_hcd);
+error_alloc:
+ return result;
+}
+
+static void hwahc_disconnect(struct usb_interface *usb_iface)
+{
+ struct usb_hcd *usb_hcd;
+ struct wusbhc *wusbhc;
+ struct hwahc *hwahc;
+
+ usb_hcd = usb_get_intfdata(usb_iface);
+ wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ hwahc = container_of(wusbhc, struct hwahc, wusbhc);
+
+ wusbhc_b_destroy(&hwahc->wusbhc);
+ usb_remove_hcd(usb_hcd);
+ hwahc_destroy(hwahc);
+ usb_put_hcd(usb_hcd);
+}
+
+static const struct usb_device_id hwahc_id_table[] = {
+ /* Alereon 5310 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5310, 0xe0, 0x02, 0x01),
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
+ /* Alereon 5611 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x13dc, 0x5611, 0xe0, 0x02, 0x01),
+ .driver_info = WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC |
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS },
+ /* FIXME: use class labels for this */
+ { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, hwahc_id_table);
+
+static struct usb_driver hwahc_driver = {
+ .name = "hwa-hc",
+ .probe = hwahc_probe,
+ .disconnect = hwahc_disconnect,
+ .id_table = hwahc_id_table,
+};
+
+module_usb_driver(hwahc_driver);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wusbcore/host/whci/Makefile b/drivers/staging/wusbcore/host/whci/Makefile
new file mode 100644
index 000000000000..859d20079df6
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_USB_WHCI_HCD) += whci-hcd.o
+
+whci-hcd-y := \
+ asl.o \
+ debug.o \
+ hcd.o \
+ hw.o \
+ init.o \
+ int.o \
+ pzl.o \
+ qset.o \
+ wusb.o
diff --git a/drivers/staging/wusbcore/host/whci/asl.c b/drivers/staging/wusbcore/host/whci/asl.c
new file mode 100644
index 000000000000..a2b9a50cfb80
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/asl.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) asynchronous schedule management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb.h>
+
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
+
+#include "whcd.h"
+
+static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
+ struct whc_qset **next, struct whc_qset **prev)
+{
+ struct list_head *n, *p;
+
+ BUG_ON(list_empty(&whc->async_list));
+
+ n = qset->list_node.next;
+ if (n == &whc->async_list)
+ n = n->next;
+ p = qset->list_node.prev;
+ if (p == &whc->async_list)
+ p = p->prev;
+
+ *next = container_of(n, struct whc_qset, list_node);
+ *prev = container_of(p, struct whc_qset, list_node);
+
+}
+
+static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
+{
+ list_move(&qset->list_node, &whc->async_list);
+ qset->in_sw_list = true;
+}
+
+static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
+{
+ struct whc_qset *next, *prev;
+
+ qset_clear(whc, qset);
+
+ /* Link into ASL. */
+ qset_get_next_prev(whc, qset, &next, &prev);
+ whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
+ whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
+ qset->in_hw_list = true;
+}
+
+static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
+{
+ struct whc_qset *prev, *next;
+
+ qset_get_next_prev(whc, qset, &next, &prev);
+
+ list_move(&qset->list_node, &whc->async_removed_list);
+ qset->in_sw_list = false;
+
+ /*
+ * No more qsets in the ASL? The caller must stop the ASL as
+ * it's no longer valid.
+ */
+ if (list_empty(&whc->async_list))
+ return;
+
+ /* Remove from ASL. */
+ whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
+ qset->in_hw_list = false;
+}
+
+/**
+ * process_qset - process any recently inactivated or halted qTDs in a
+ * qset.
+ *
+ * After inactive qTDs are removed, new qTDs can be added if the
+ * urb queue still contains URBs.
+ *
+ * Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
+ * WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
+ */
+static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
+{
+ enum whc_update update = 0;
+ uint32_t status = 0;
+
+ while (qset->ntds) {
+ struct whc_qtd *td;
+
+ td = &qset->qtd[qset->td_start];
+ status = le32_to_cpu(td->status);
+
+ /*
+ * Nothing to do with a still active qTD.
+ */
+ if (status & QTD_STS_ACTIVE)
+ break;
+
+ if (status & QTD_STS_HALTED) {
+ /* Ug, an error. */
+ process_halted_qtd(whc, qset, td);
+ /* A halted qTD always triggers an update
+ because the qset was either removed or
+ reactivated. */
+ update |= WHC_UPDATE_UPDATED;
+ goto done;
+ }
+
+ /* Mmm, a completed qTD. */
+ process_inactive_qtd(whc, qset, td);
+ }
+
+ if (!qset->remove)
+ update |= qset_add_qtds(whc, qset);
+
+done:
+ /*
+ * Remove this qset from the ASL if requested, but only if has
+ * no qTDs.
+ */
+ if (qset->remove && qset->ntds == 0) {
+ asl_qset_remove(whc, qset);
+ update |= WHC_UPDATE_REMOVED;
+ }
+ return update;
+}
+
+void asl_start(struct whc *whc)
+{
+ struct whc_qset *qset;
+
+ qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
+
+ le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
+
+ whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
+ 1000, "start ASL");
+}
+
+void asl_stop(struct whc *whc)
+{
+ whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_ASYNC_SCHED, 0,
+ 1000, "stop ASL");
+}
+
+/**
+ * asl_update - request an ASL update and wait for the hardware to be synced
+ * @whc: the WHCI HC
+ * @wusbcmd: WUSBCMD value to start the update.
+ *
+ * If the WUSB HC is inactive (i.e., the ASL is stopped) then the
+ * update must be skipped as the hardware may not respond to update
+ * requests.
+ */
+void asl_update(struct whc *whc, uint32_t wusbcmd)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+ long t;
+
+ mutex_lock(&wusbhc->mutex);
+ if (wusbhc->active) {
+ whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
+ t = wait_event_timeout(
+ whc->async_list_wq,
+ (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0,
+ msecs_to_jiffies(1000));
+ if (t == 0)
+ whc_hw_error(whc, "ASL update timeout");
+ }
+ mutex_unlock(&wusbhc->mutex);
+}
+
+/**
+ * scan_async_work - scan the ASL for qsets to process.
+ *
+ * Process each qset in the ASL in turn and then signal the WHC that
+ * the ASL has been updated.
+ *
+ * Then start, stop or update the asynchronous schedule as required.
+ */
+void scan_async_work(struct work_struct *work)
+{
+ struct whc *whc = container_of(work, struct whc, async_work);
+ struct whc_qset *qset, *t;
+ enum whc_update update = 0;
+
+ spin_lock_irq(&whc->lock);
+
+ /*
+ * Transerve the software list backwards so new qsets can be
+ * safely inserted into the ASL without making it non-circular.
+ */
+ list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
+ if (!qset->in_hw_list) {
+ asl_qset_insert(whc, qset);
+ update |= WHC_UPDATE_ADDED;
+ }
+
+ update |= process_qset(whc, qset);
+ }
+
+ spin_unlock_irq(&whc->lock);
+
+ if (update) {
+ uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
+ if (update & WHC_UPDATE_REMOVED)
+ wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
+ asl_update(whc, wusbcmd);
+ }
+
+ /*
+ * Now that the ASL is updated, complete the removal of any
+ * removed qsets.
+ *
+ * If the qset was to be reset, do so and reinsert it into the
+ * ASL if it has pending transfers.
+ */
+ spin_lock_irq(&whc->lock);
+
+ list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
+ qset_remove_complete(whc, qset);
+ if (qset->reset) {
+ qset_reset(whc, qset);
+ if (!list_empty(&qset->stds)) {
+ asl_qset_insert_begin(whc, qset);
+ queue_work(whc->workqueue, &whc->async_work);
+ }
+ }
+ }
+
+ spin_unlock_irq(&whc->lock);
+}
+
+/**
+ * asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
+ * @whc: the WHCI host controller
+ * @urb: the URB to enqueue
+ * @mem_flags: flags for any memory allocations
+ *
+ * The qset for the endpoint is obtained and the urb queued on to it.
+ *
+ * Work is scheduled to update the hardware's view of the ASL.
+ */
+int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
+ if (err < 0) {
+ spin_unlock_irqrestore(&whc->lock, flags);
+ return err;
+ }
+
+ qset = get_qset(whc, urb, GFP_ATOMIC);
+ if (qset == NULL)
+ err = -ENOMEM;
+ else
+ err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
+ if (!err) {
+ if (!qset->in_sw_list && !qset->remove)
+ asl_qset_insert_begin(whc, qset);
+ } else
+ usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ if (!err)
+ queue_work(whc->workqueue, &whc->async_work);
+
+ return err;
+}
+
+/**
+ * asl_urb_dequeue - remove an URB (qset) from the async list.
+ * @whc: the WHCI host controller
+ * @urb: the URB to dequeue
+ * @status: the current status of the URB
+ *
+ * URBs that do yet have qTDs can simply be removed from the software
+ * queue, otherwise the qset must be removed from the ASL so the qTDs
+ * can be removed.
+ */
+int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
+{
+ struct whc_urb *wurb = urb->hcpriv;
+ struct whc_qset *qset = wurb->qset;
+ struct whc_std *std, *t;
+ bool has_qtd = false;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
+ if (ret < 0)
+ goto out;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb == urb) {
+ if (std->qtd)
+ has_qtd = true;
+ qset_free_std(whc, std);
+ } else
+ std->qtd = NULL; /* so this std is re-added when the qset is */
+ }
+
+ if (has_qtd) {
+ asl_qset_remove(whc, qset);
+ wurb->status = status;
+ wurb->is_async = true;
+ queue_work(whc->workqueue, &wurb->dequeue_work);
+ } else
+ qset_remove_urb(whc, qset, urb, status);
+out:
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ return ret;
+}
+
+/**
+ * asl_qset_delete - delete a qset from the ASL
+ */
+void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+ qset->remove = 1;
+ queue_work(whc->workqueue, &whc->async_work);
+ qset_delete(whc, qset);
+}
+
+/**
+ * asl_init - initialize the asynchronous schedule list
+ *
+ * A dummy qset with no qTDs is added to the ASL to simplify removing
+ * qsets (no need to stop the ASL when the last qset is removed).
+ */
+int asl_init(struct whc *whc)
+{
+ struct whc_qset *qset;
+
+ qset = qset_alloc(whc, GFP_KERNEL);
+ if (qset == NULL)
+ return -ENOMEM;
+
+ asl_qset_insert_begin(whc, qset);
+ asl_qset_insert(whc, qset);
+
+ return 0;
+}
+
+/**
+ * asl_clean_up - free ASL resources
+ *
+ * The ASL is stopped and empty except for the dummy qset.
+ */
+void asl_clean_up(struct whc *whc)
+{
+ struct whc_qset *qset;
+
+ if (!list_empty(&whc->async_list)) {
+ qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
+ list_del(&qset->list_node);
+ qset_free(whc, qset);
+ }
+}
diff --git a/drivers/staging/wusbcore/host/whci/debug.c b/drivers/staging/wusbcore/host/whci/debug.c
new file mode 100644
index 000000000000..443da6719147
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/debug.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) debug.
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/export.h>
+
+#include "../../wusbhc.h"
+
+#include "whcd.h"
+
+struct whc_dbg {
+ struct dentry *di_f;
+ struct dentry *asl_f;
+ struct dentry *pzl_f;
+};
+
+static void qset_print(struct seq_file *s, struct whc_qset *qset)
+{
+ static const char *qh_type[] = {
+ "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
+ struct whc_std *std;
+ struct urb *urb = NULL;
+ int i;
+
+ seq_printf(s, "qset %08x", (u32)qset->qset_dma);
+ if (&qset->list_node == qset->whc->async_list.prev) {
+ seq_printf(s, " (dummy)\n");
+ } else {
+ seq_printf(s, " ep%d%s-%s maxpkt: %d\n",
+ qset->qh.info1 & 0x0f,
+ (qset->qh.info1 >> 4) & 0x1 ? "in" : "out",
+ qh_type[(qset->qh.info1 >> 5) & 0x7],
+ (qset->qh.info1 >> 16) & 0xffff);
+ }
+ seq_printf(s, " -> %08x\n", (u32)qset->qh.link);
+ seq_printf(s, " info: %08x %08x %08x\n",
+ qset->qh.info1, qset->qh.info2, qset->qh.info3);
+ seq_printf(s, " sts: %04x errs: %d curwin: %08x\n",
+ qset->qh.status, qset->qh.err_count, qset->qh.cur_window);
+ seq_printf(s, " TD: sts: %08x opts: %08x\n",
+ qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
+
+ for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
+ seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
+ i == qset->td_start ? 'S' : ' ',
+ i == qset->td_end ? 'E' : ' ',
+ i, qset->qtd[i].status, qset->qtd[i].options,
+ (u32)qset->qtd[i].page_list_ptr);
+ }
+ seq_printf(s, " ntds: %d\n", qset->ntds);
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (urb != std->urb) {
+ urb = std->urb;
+ seq_printf(s, " urb %p transferred: %d bytes\n", urb,
+ urb->actual_length);
+ }
+ if (std->qtd)
+ seq_printf(s, " sTD[%td]: %zu bytes @ %08x\n",
+ std->qtd - &qset->qtd[0],
+ std->len, std->num_pointers ?
+ (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
+ else
+ seq_printf(s, " sTD[-]: %zd bytes @ %08x\n",
+ std->len, std->num_pointers ?
+ (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
+ }
+}
+
+static int di_show(struct seq_file *s, void *p)
+{
+ struct whc *whc = s->private;
+ int d;
+
+ for (d = 0; d < whc->n_devices; d++) {
+ struct di_buf_entry *di = &whc->di_buf[d];
+
+ seq_printf(s, "DI[%d]\n", d);
+ seq_printf(s, " availability: %*pb\n",
+ UWB_NUM_MAS, (unsigned long *)di->availability_info);
+ seq_printf(s, " %c%c key idx: %d dev addr: %d\n",
+ (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ',
+ (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ',
+ (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8,
+ (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK));
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(di);
+
+static int asl_show(struct seq_file *s, void *p)
+{
+ struct whc *whc = s->private;
+ struct whc_qset *qset;
+
+ list_for_each_entry(qset, &whc->async_list, list_node) {
+ qset_print(s, qset);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(asl);
+
+static int pzl_show(struct seq_file *s, void *p)
+{
+ struct whc *whc = s->private;
+ struct whc_qset *qset;
+ int period;
+
+ for (period = 0; period < 5; period++) {
+ seq_printf(s, "Period %d\n", period);
+ list_for_each_entry(qset, &whc->periodic_list[period], list_node) {
+ qset_print(s, qset);
+ }
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pzl);
+
+void whc_dbg_init(struct whc *whc)
+{
+ if (whc->wusbhc.pal.debugfs_dir == NULL)
+ return;
+
+ whc->dbg = kzalloc(sizeof(struct whc_dbg), GFP_KERNEL);
+ if (whc->dbg == NULL)
+ return;
+
+ whc->dbg->di_f = debugfs_create_file("di", 0444,
+ whc->wusbhc.pal.debugfs_dir, whc,
+ &di_fops);
+ whc->dbg->asl_f = debugfs_create_file("asl", 0444,
+ whc->wusbhc.pal.debugfs_dir, whc,
+ &asl_fops);
+ whc->dbg->pzl_f = debugfs_create_file("pzl", 0444,
+ whc->wusbhc.pal.debugfs_dir, whc,
+ &pzl_fops);
+}
+
+void whc_dbg_clean_up(struct whc *whc)
+{
+ if (whc->dbg) {
+ debugfs_remove(whc->dbg->pzl_f);
+ debugfs_remove(whc->dbg->asl_f);
+ debugfs_remove(whc->dbg->di_f);
+ kfree(whc->dbg);
+ }
+}
diff --git a/drivers/staging/wusbcore/host/whci/hcd.c b/drivers/staging/wusbcore/host/whci/hcd.c
new file mode 100644
index 000000000000..bee1ff2d35be
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/hcd.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) driver.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
+
+#include "whcd.h"
+
+/*
+ * One time initialization.
+ *
+ * Nothing to do here.
+ */
+static int whc_reset(struct usb_hcd *usb_hcd)
+{
+ return 0;
+}
+
+/*
+ * Start the wireless host controller.
+ *
+ * Start device notification.
+ *
+ * Put hc into run state, set DNTS parameters.
+ */
+static int whc_start(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u8 bcid;
+ int ret;
+
+ mutex_lock(&wusbhc->mutex);
+
+ le_writel(WUSBINTR_GEN_CMD_DONE
+ | WUSBINTR_HOST_ERR
+ | WUSBINTR_ASYNC_SCHED_SYNCED
+ | WUSBINTR_DNTS_INT
+ | WUSBINTR_ERR_INT
+ | WUSBINTR_INT,
+ whc->base + WUSBINTR);
+
+ /* set cluster ID */
+ bcid = wusb_cluster_id_get();
+ ret = whc_set_cluster_id(whc, bcid);
+ if (ret < 0)
+ goto out;
+ wusbhc->cluster_id = bcid;
+
+ /* start HC */
+ whc_write_wusbcmd(whc, WUSBCMD_RUN, WUSBCMD_RUN);
+
+ usb_hcd->uses_new_polling = 1;
+ set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
+ usb_hcd->state = HC_STATE_RUNNING;
+
+out:
+ mutex_unlock(&wusbhc->mutex);
+ return ret;
+}
+
+
+/*
+ * Stop the wireless host controller.
+ *
+ * Stop device notification.
+ *
+ * Wait for pending transfer to stop? Put hc into stop state?
+ */
+static void whc_stop(struct usb_hcd *usb_hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ mutex_lock(&wusbhc->mutex);
+
+ /* stop HC */
+ le_writel(0, whc->base + WUSBINTR);
+ whc_write_wusbcmd(whc, WUSBCMD_RUN, 0);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_HCHALTED, WUSBSTS_HCHALTED,
+ 100, "HC to halt");
+
+ wusb_cluster_id_put(wusbhc->cluster_id);
+
+ mutex_unlock(&wusbhc->mutex);
+}
+
+static int whc_get_frame_number(struct usb_hcd *usb_hcd)
+{
+ /* Frame numbers are not applicable to WUSB. */
+ return -ENOSYS;
+}
+
+
+/*
+ * Queue an URB to the ASL or PZL
+ */
+static int whc_urb_enqueue(struct usb_hcd *usb_hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int ret;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_INTERRUPT:
+ ret = pzl_urb_enqueue(whc, urb, mem_flags);
+ break;
+ case PIPE_ISOCHRONOUS:
+ dev_err(&whc->umc->dev, "isochronous transfers unsupported\n");
+ ret = -ENOTSUPP;
+ break;
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ default:
+ ret = asl_urb_enqueue(whc, urb, mem_flags);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Remove a queued URB from the ASL or PZL.
+ */
+static int whc_urb_dequeue(struct usb_hcd *usb_hcd, struct urb *urb, int status)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int ret;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_INTERRUPT:
+ ret = pzl_urb_dequeue(whc, urb, status);
+ break;
+ case PIPE_ISOCHRONOUS:
+ ret = -ENOTSUPP;
+ break;
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ default:
+ ret = asl_urb_dequeue(whc, urb, status);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Wait for all URBs to the endpoint to be completed, then delete the
+ * qset.
+ */
+static void whc_endpoint_disable(struct usb_hcd *usb_hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ struct whc_qset *qset;
+
+ qset = ep->hcpriv;
+ if (qset) {
+ ep->hcpriv = NULL;
+ if (usb_endpoint_xfer_bulk(&ep->desc)
+ || usb_endpoint_xfer_control(&ep->desc))
+ asl_qset_delete(whc, qset);
+ else
+ pzl_qset_delete(whc, qset);
+ }
+}
+
+static void whc_endpoint_reset(struct usb_hcd *usb_hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ struct whc_qset *qset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ qset = ep->hcpriv;
+ if (qset) {
+ qset->remove = 1;
+ qset->reset = 1;
+
+ if (usb_endpoint_xfer_bulk(&ep->desc)
+ || usb_endpoint_xfer_control(&ep->desc))
+ queue_work(whc->workqueue, &whc->async_work);
+ else
+ queue_work(whc->workqueue, &whc->periodic_work);
+ }
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+
+static const struct hc_driver whc_hc_driver = {
+ .description = "whci-hcd",
+ .product_desc = "Wireless host controller",
+ .hcd_priv_size = sizeof(struct whc) - sizeof(struct usb_hcd),
+ .irq = whc_int_handler,
+ .flags = HCD_USB2,
+
+ .reset = whc_reset,
+ .start = whc_start,
+ .stop = whc_stop,
+ .get_frame_number = whc_get_frame_number,
+ .urb_enqueue = whc_urb_enqueue,
+ .urb_dequeue = whc_urb_dequeue,
+ .endpoint_disable = whc_endpoint_disable,
+ .endpoint_reset = whc_endpoint_reset,
+
+ .hub_status_data = wusbhc_rh_status_data,
+ .hub_control = wusbhc_rh_control,
+ .start_port_reset = wusbhc_rh_start_port_reset,
+};
+
+static int whc_probe(struct umc_dev *umc)
+{
+ int ret;
+ struct usb_hcd *usb_hcd;
+ struct wusbhc *wusbhc;
+ struct whc *whc;
+ struct device *dev = &umc->dev;
+
+ usb_hcd = usb_create_hcd(&whc_hc_driver, dev, "whci");
+ if (usb_hcd == NULL) {
+ dev_err(dev, "unable to create hcd\n");
+ return -ENOMEM;
+ }
+
+ usb_hcd->wireless = 1;
+ usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */
+
+ wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ whc = wusbhc_to_whc(wusbhc);
+ whc->umc = umc;
+
+ ret = whc_init(whc);
+ if (ret)
+ goto error_whc_init;
+
+ wusbhc->dev = dev;
+ wusbhc->uwb_rc = uwb_rc_get_by_grandpa(umc->dev.parent);
+ if (!wusbhc->uwb_rc) {
+ ret = -ENODEV;
+ dev_err(dev, "cannot get radio controller\n");
+ goto error_uwb_rc;
+ }
+
+ if (whc->n_devices > USB_MAXCHILDREN) {
+ dev_warn(dev, "USB_MAXCHILDREN too low for WUSB adapter (%u ports)\n",
+ whc->n_devices);
+ wusbhc->ports_max = USB_MAXCHILDREN;
+ } else
+ wusbhc->ports_max = whc->n_devices;
+ wusbhc->mmcies_max = whc->n_mmc_ies;
+ wusbhc->start = whc_wusbhc_start;
+ wusbhc->stop = whc_wusbhc_stop;
+ wusbhc->mmcie_add = whc_mmcie_add;
+ wusbhc->mmcie_rm = whc_mmcie_rm;
+ wusbhc->dev_info_set = whc_dev_info_set;
+ wusbhc->bwa_set = whc_bwa_set;
+ wusbhc->set_num_dnts = whc_set_num_dnts;
+ wusbhc->set_ptk = whc_set_ptk;
+ wusbhc->set_gtk = whc_set_gtk;
+
+ ret = wusbhc_create(wusbhc);
+ if (ret)
+ goto error_wusbhc_create;
+
+ ret = usb_add_hcd(usb_hcd, whc->umc->irq, IRQF_SHARED);
+ if (ret) {
+ dev_err(dev, "cannot add HCD: %d\n", ret);
+ goto error_usb_add_hcd;
+ }
+ device_wakeup_enable(usb_hcd->self.controller);
+
+ ret = wusbhc_b_create(wusbhc);
+ if (ret) {
+ dev_err(dev, "WUSBHC phase B setup failed: %d\n", ret);
+ goto error_wusbhc_b_create;
+ }
+
+ whc_dbg_init(whc);
+
+ return 0;
+
+error_wusbhc_b_create:
+ usb_remove_hcd(usb_hcd);
+error_usb_add_hcd:
+ wusbhc_destroy(wusbhc);
+error_wusbhc_create:
+ uwb_rc_put(wusbhc->uwb_rc);
+error_uwb_rc:
+ whc_clean_up(whc);
+error_whc_init:
+ usb_put_hcd(usb_hcd);
+ return ret;
+}
+
+
+static void whc_remove(struct umc_dev *umc)
+{
+ struct usb_hcd *usb_hcd = dev_get_drvdata(&umc->dev);
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ if (usb_hcd) {
+ whc_dbg_clean_up(whc);
+ wusbhc_b_destroy(wusbhc);
+ usb_remove_hcd(usb_hcd);
+ wusbhc_destroy(wusbhc);
+ uwb_rc_put(wusbhc->uwb_rc);
+ whc_clean_up(whc);
+ usb_put_hcd(usb_hcd);
+ }
+}
+
+static struct umc_driver whci_hc_driver = {
+ .name = "whci-hcd",
+ .cap_id = UMC_CAP_ID_WHCI_WUSB_HC,
+ .probe = whc_probe,
+ .remove = whc_remove,
+};
+
+static int __init whci_hc_driver_init(void)
+{
+ return umc_driver_register(&whci_hc_driver);
+}
+module_init(whci_hc_driver_init);
+
+static void __exit whci_hc_driver_exit(void)
+{
+ umc_driver_unregister(&whci_hc_driver);
+}
+module_exit(whci_hc_driver_exit);
+
+/* PCI device ID's that we handle (so it gets loaded) */
+static struct pci_device_id __used whci_hcd_id_table[] = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) },
+ { /* empty last entry */ }
+};
+MODULE_DEVICE_TABLE(pci, whci_hcd_id_table);
+
+MODULE_DESCRIPTION("WHCI Wireless USB host controller driver");
+MODULE_AUTHOR("Cambridge Silicon Radio Ltd.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wusbcore/host/whci/hw.c b/drivers/staging/wusbcore/host/whci/hw.c
new file mode 100644
index 000000000000..e4e8914abf42
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/hw.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) hardware access helpers.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
+
+#include "whcd.h"
+
+void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val)
+{
+ unsigned long flags;
+ u32 cmd;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ cmd = le_readl(whc->base + WUSBCMD);
+ cmd = (cmd & ~mask) | val;
+ le_writel(cmd, whc->base + WUSBCMD);
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+/**
+ * whc_do_gencmd - start a generic command via the WUSBGENCMDSTS register
+ * @whc: the WHCI HC
+ * @cmd: command to start.
+ * @params: parameters for the command (the WUSBGENCMDPARAMS register value).
+ * @addr: pointer to any data for the command (may be NULL).
+ * @len: length of the data (if any).
+ */
+int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len)
+{
+ unsigned long flags;
+ dma_addr_t dma_addr;
+ int t;
+ int ret = 0;
+
+ mutex_lock(&whc->mutex);
+
+ /* Wait for previous command to complete. */
+ t = wait_event_timeout(whc->cmd_wq,
+ (le_readl(whc->base + WUSBGENCMDSTS) & WUSBGENCMDSTS_ACTIVE) == 0,
+ WHC_GENCMD_TIMEOUT_MS);
+ if (t == 0) {
+ dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n",
+ le_readl(whc->base + WUSBGENCMDSTS),
+ le_readl(whc->base + WUSBGENCMDPARAMS));
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (addr) {
+ memcpy(whc->gen_cmd_buf, addr, len);
+ dma_addr = whc->gen_cmd_buf_dma;
+ } else
+ dma_addr = 0;
+
+ /* Poke registers to start cmd. */
+ spin_lock_irqsave(&whc->lock, flags);
+
+ le_writel(params, whc->base + WUSBGENCMDPARAMS);
+ le_writeq(dma_addr, whc->base + WUSBGENADDR);
+
+ le_writel(WUSBGENCMDSTS_ACTIVE | WUSBGENCMDSTS_IOC | cmd,
+ whc->base + WUSBGENCMDSTS);
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+out:
+ mutex_unlock(&whc->mutex);
+
+ return ret;
+}
+
+/**
+ * whc_hw_error - recover from a hardware error
+ * @whc: the WHCI HC that broke.
+ * @reason: a description of the failure.
+ *
+ * Recover from broken hardware with a full reset.
+ */
+void whc_hw_error(struct whc *whc, const char *reason)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+
+ dev_err(&whc->umc->dev, "hardware error: %s\n", reason);
+ wusbhc_reset_all(wusbhc);
+}
diff --git a/drivers/staging/wusbcore/host/whci/init.c b/drivers/staging/wusbcore/host/whci/init.c
new file mode 100644
index 000000000000..55fd458a8f30
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/init.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) initialization.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
+
+#include "whcd.h"
+
+/*
+ * Reset the host controller.
+ */
+static void whc_hw_reset(struct whc *whc)
+{
+ le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0,
+ 100, "reset");
+}
+
+static void whc_hw_init_di_buf(struct whc *whc)
+{
+ int d;
+
+ /* Disable all entries in the Device Information buffer. */
+ for (d = 0; d < whc->n_devices; d++)
+ whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE;
+
+ le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR);
+}
+
+static void whc_hw_init_dn_buf(struct whc *whc)
+{
+ /* Clear the Device Notification buffer to ensure the V (valid)
+ * bits are clear. */
+ memset(whc->dn_buf, 0, 4096);
+
+ le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR);
+}
+
+int whc_init(struct whc *whc)
+{
+ u32 whcsparams;
+ int ret, i;
+ resource_size_t start, len;
+
+ spin_lock_init(&whc->lock);
+ mutex_init(&whc->mutex);
+ init_waitqueue_head(&whc->cmd_wq);
+ init_waitqueue_head(&whc->async_list_wq);
+ init_waitqueue_head(&whc->periodic_list_wq);
+ whc->workqueue = alloc_ordered_workqueue(dev_name(&whc->umc->dev), 0);
+ if (whc->workqueue == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ INIT_WORK(&whc->dn_work, whc_dn_work);
+
+ INIT_WORK(&whc->async_work, scan_async_work);
+ INIT_LIST_HEAD(&whc->async_list);
+ INIT_LIST_HEAD(&whc->async_removed_list);
+
+ INIT_WORK(&whc->periodic_work, scan_periodic_work);
+ for (i = 0; i < 5; i++)
+ INIT_LIST_HEAD(&whc->periodic_list[i]);
+ INIT_LIST_HEAD(&whc->periodic_removed_list);
+
+ /* Map HC registers. */
+ start = whc->umc->resource.start;
+ len = whc->umc->resource.end - start + 1;
+ if (!request_mem_region(start, len, "whci-hc")) {
+ dev_err(&whc->umc->dev, "can't request HC region\n");
+ ret = -EBUSY;
+ goto error;
+ }
+ whc->base_phys = start;
+ whc->base = ioremap(start, len);
+ if (!whc->base) {
+ dev_err(&whc->umc->dev, "ioremap\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ whc_hw_reset(whc);
+
+ /* Read maximum number of devices, keys and MMC IEs. */
+ whcsparams = le_readl(whc->base + WHCSPARAMS);
+ whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams);
+ whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams);
+ whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams);
+
+ dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n",
+ whc->n_devices, whc->n_keys, whc->n_mmc_ies);
+
+ whc->qset_pool = dma_pool_create("qset", &whc->umc->dev,
+ sizeof(struct whc_qset), 64, 0);
+ if (whc->qset_pool == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = asl_init(whc);
+ if (ret < 0)
+ goto error;
+ ret = pzl_init(whc);
+ if (ret < 0)
+ goto error;
+
+ /* Allocate and initialize a buffer for generic commands, the
+ Device Information buffer, and the Device Notification
+ buffer. */
+
+ whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
+ &whc->gen_cmd_buf_dma, GFP_KERNEL);
+ if (whc->gen_cmd_buf == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ whc->dn_buf = dma_alloc_coherent(&whc->umc->dev,
+ sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
+ &whc->dn_buf_dma, GFP_KERNEL);
+ if (!whc->dn_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ whc_hw_init_dn_buf(whc);
+
+ whc->di_buf = dma_alloc_coherent(&whc->umc->dev,
+ sizeof(struct di_buf_entry) * whc->n_devices,
+ &whc->di_buf_dma, GFP_KERNEL);
+ if (!whc->di_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ whc_hw_init_di_buf(whc);
+
+ return 0;
+
+error:
+ whc_clean_up(whc);
+ return ret;
+}
+
+void whc_clean_up(struct whc *whc)
+{
+ resource_size_t len;
+
+ if (whc->di_buf)
+ dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices,
+ whc->di_buf, whc->di_buf_dma);
+ if (whc->dn_buf)
+ dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
+ whc->dn_buf, whc->dn_buf_dma);
+ if (whc->gen_cmd_buf)
+ dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
+ whc->gen_cmd_buf, whc->gen_cmd_buf_dma);
+
+ pzl_clean_up(whc);
+ asl_clean_up(whc);
+
+ dma_pool_destroy(whc->qset_pool);
+
+ len = resource_size(&whc->umc->resource);
+ if (whc->base)
+ iounmap(whc->base);
+ if (whc->base_phys)
+ release_mem_region(whc->base_phys, len);
+
+ if (whc->workqueue)
+ destroy_workqueue(whc->workqueue);
+}
diff --git a/drivers/staging/wusbcore/host/whci/int.c b/drivers/staging/wusbcore/host/whci/int.c
new file mode 100644
index 000000000000..bdbe35e9366f
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/int.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) interrupt handling.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
+
+#include "whcd.h"
+
+static void transfer_done(struct whc *whc)
+{
+ queue_work(whc->workqueue, &whc->async_work);
+ queue_work(whc->workqueue, &whc->periodic_work);
+}
+
+irqreturn_t whc_int_handler(struct usb_hcd *hcd)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(hcd);
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 sts;
+
+ sts = le_readl(whc->base + WUSBSTS);
+ if (!(sts & WUSBSTS_INT_MASK))
+ return IRQ_NONE;
+ le_writel(sts & WUSBSTS_INT_MASK, whc->base + WUSBSTS);
+
+ if (sts & WUSBSTS_GEN_CMD_DONE)
+ wake_up(&whc->cmd_wq);
+
+ if (sts & WUSBSTS_HOST_ERR)
+ dev_err(&whc->umc->dev, "FIXME: host system error\n");
+
+ if (sts & WUSBSTS_ASYNC_SCHED_SYNCED)
+ wake_up(&whc->async_list_wq);
+
+ if (sts & WUSBSTS_PERIODIC_SCHED_SYNCED)
+ wake_up(&whc->periodic_list_wq);
+
+ if (sts & WUSBSTS_DNTS_INT)
+ queue_work(whc->workqueue, &whc->dn_work);
+
+ /*
+ * A transfer completed (see [WHCI] section 4.7.1.2 for when
+ * this occurs).
+ */
+ if (sts & (WUSBSTS_INT | WUSBSTS_ERR_INT))
+ transfer_done(whc);
+
+ return IRQ_HANDLED;
+}
+
+static int process_dn_buf(struct whc *whc)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+ struct dn_buf_entry *dn;
+ int processed = 0;
+
+ for (dn = whc->dn_buf; dn < whc->dn_buf + WHC_N_DN_ENTRIES; dn++) {
+ if (dn->status & WHC_DN_STATUS_VALID) {
+ wusbhc_handle_dn(wusbhc, dn->src_addr,
+ (struct wusb_dn_hdr *)dn->dn_data,
+ dn->msg_size);
+ dn->status &= ~WHC_DN_STATUS_VALID;
+ processed++;
+ }
+ }
+ return processed;
+}
+
+void whc_dn_work(struct work_struct *work)
+{
+ struct whc *whc = container_of(work, struct whc, dn_work);
+ int processed;
+
+ do {
+ processed = process_dn_buf(whc);
+ } while (processed);
+}
diff --git a/drivers/staging/wusbcore/host/whci/pzl.c b/drivers/staging/wusbcore/host/whci/pzl.c
new file mode 100644
index 000000000000..6dfc075f5798
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/pzl.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) periodic schedule management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb.h>
+
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
+
+#include "whcd.h"
+
+static void update_pzl_pointers(struct whc *whc, int period, u64 addr)
+{
+ switch (period) {
+ case 0:
+ whc_qset_set_link_ptr(&whc->pz_list[0], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[2], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[4], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[6], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[8], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[10], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[12], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[14], addr);
+ break;
+ case 1:
+ whc_qset_set_link_ptr(&whc->pz_list[1], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[5], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[9], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[13], addr);
+ break;
+ case 2:
+ whc_qset_set_link_ptr(&whc->pz_list[3], addr);
+ whc_qset_set_link_ptr(&whc->pz_list[11], addr);
+ break;
+ case 3:
+ whc_qset_set_link_ptr(&whc->pz_list[7], addr);
+ break;
+ case 4:
+ whc_qset_set_link_ptr(&whc->pz_list[15], addr);
+ break;
+ }
+}
+
+/*
+ * Return the 'period' to use for this qset. The minimum interval for
+ * the endpoint is used so whatever urbs are submitted the device is
+ * polled often enough.
+ */
+static int qset_get_period(struct whc *whc, struct whc_qset *qset)
+{
+ uint8_t bInterval = qset->ep->desc.bInterval;
+
+ if (bInterval < 6)
+ bInterval = 6;
+ if (bInterval > 10)
+ bInterval = 10;
+ return bInterval - 6;
+}
+
+static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset)
+{
+ int period;
+
+ period = qset_get_period(whc, qset);
+
+ qset_clear(whc, qset);
+ list_move(&qset->list_node, &whc->periodic_list[period]);
+ qset->in_sw_list = true;
+}
+
+static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset)
+{
+ list_move(&qset->list_node, &whc->periodic_removed_list);
+ qset->in_hw_list = false;
+ qset->in_sw_list = false;
+}
+
+/**
+ * pzl_process_qset - process any recently inactivated or halted qTDs
+ * in a qset.
+ *
+ * After inactive qTDs are removed, new qTDs can be added if the
+ * urb queue still contains URBs.
+ *
+ * Returns the schedule updates required.
+ */
+static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
+{
+ enum whc_update update = 0;
+ uint32_t status = 0;
+
+ while (qset->ntds) {
+ struct whc_qtd *td;
+
+ td = &qset->qtd[qset->td_start];
+ status = le32_to_cpu(td->status);
+
+ /*
+ * Nothing to do with a still active qTD.
+ */
+ if (status & QTD_STS_ACTIVE)
+ break;
+
+ if (status & QTD_STS_HALTED) {
+ /* Ug, an error. */
+ process_halted_qtd(whc, qset, td);
+ /* A halted qTD always triggers an update
+ because the qset was either removed or
+ reactivated. */
+ update |= WHC_UPDATE_UPDATED;
+ goto done;
+ }
+
+ /* Mmm, a completed qTD. */
+ process_inactive_qtd(whc, qset, td);
+ }
+
+ if (!qset->remove)
+ update |= qset_add_qtds(whc, qset);
+
+done:
+ /*
+ * If there are no qTDs in this qset, remove it from the PZL.
+ */
+ if (qset->remove && qset->ntds == 0) {
+ pzl_qset_remove(whc, qset);
+ update |= WHC_UPDATE_REMOVED;
+ }
+
+ return update;
+}
+
+/**
+ * pzl_start - start the periodic schedule
+ * @whc: the WHCI host controller
+ *
+ * The PZL must be valid (e.g., all entries in the list should have
+ * the T bit set).
+ */
+void pzl_start(struct whc *whc)
+{
+ le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
+
+ whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED,
+ 1000, "start PZL");
+}
+
+/**
+ * pzl_stop - stop the periodic schedule
+ * @whc: the WHCI host controller
+ */
+void pzl_stop(struct whc *whc)
+{
+ whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0);
+ whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
+ WUSBSTS_PERIODIC_SCHED, 0,
+ 1000, "stop PZL");
+}
+
+/**
+ * pzl_update - request a PZL update and wait for the hardware to be synced
+ * @whc: the WHCI HC
+ * @wusbcmd: WUSBCMD value to start the update.
+ *
+ * If the WUSB HC is inactive (i.e., the PZL is stopped) then the
+ * update must be skipped as the hardware may not respond to update
+ * requests.
+ */
+void pzl_update(struct whc *whc, uint32_t wusbcmd)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+ long t;
+
+ mutex_lock(&wusbhc->mutex);
+ if (wusbhc->active) {
+ whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
+ t = wait_event_timeout(
+ whc->periodic_list_wq,
+ (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0,
+ msecs_to_jiffies(1000));
+ if (t == 0)
+ whc_hw_error(whc, "PZL update timeout");
+ }
+ mutex_unlock(&wusbhc->mutex);
+}
+
+static void update_pzl_hw_view(struct whc *whc)
+{
+ struct whc_qset *qset, *t;
+ int period;
+ u64 tmp_qh = 0;
+
+ for (period = 0; period < 5; period++) {
+ list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
+ whc_qset_set_link_ptr(&qset->qh.link, tmp_qh);
+ tmp_qh = qset->qset_dma;
+ qset->in_hw_list = true;
+ }
+ update_pzl_pointers(whc, period, tmp_qh);
+ }
+}
+
+/**
+ * scan_periodic_work - scan the PZL for qsets to process.
+ *
+ * Process each qset in the PZL in turn and then signal the WHC that
+ * the PZL has been updated.
+ *
+ * Then start, stop or update the periodic schedule as required.
+ */
+void scan_periodic_work(struct work_struct *work)
+{
+ struct whc *whc = container_of(work, struct whc, periodic_work);
+ struct whc_qset *qset, *t;
+ enum whc_update update = 0;
+ int period;
+
+ spin_lock_irq(&whc->lock);
+
+ for (period = 4; period >= 0; period--) {
+ list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
+ if (!qset->in_hw_list)
+ update |= WHC_UPDATE_ADDED;
+ update |= pzl_process_qset(whc, qset);
+ }
+ }
+
+ if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED))
+ update_pzl_hw_view(whc);
+
+ spin_unlock_irq(&whc->lock);
+
+ if (update) {
+ uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB;
+ if (update & WHC_UPDATE_REMOVED)
+ wusbcmd |= WUSBCMD_PERIODIC_QSET_RM;
+ pzl_update(whc, wusbcmd);
+ }
+
+ /*
+ * Now that the PZL is updated, complete the removal of any
+ * removed qsets.
+ *
+ * If the qset was to be reset, do so and reinsert it into the
+ * PZL if it has pending transfers.
+ */
+ spin_lock_irq(&whc->lock);
+
+ list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
+ qset_remove_complete(whc, qset);
+ if (qset->reset) {
+ qset_reset(whc, qset);
+ if (!list_empty(&qset->stds)) {
+ qset_insert_in_sw_list(whc, qset);
+ queue_work(whc->workqueue, &whc->periodic_work);
+ }
+ }
+ }
+
+ spin_unlock_irq(&whc->lock);
+}
+
+/**
+ * pzl_urb_enqueue - queue an URB onto the periodic list (PZL)
+ * @whc: the WHCI host controller
+ * @urb: the URB to enqueue
+ * @mem_flags: flags for any memory allocations
+ *
+ * The qset for the endpoint is obtained and the urb queued on to it.
+ *
+ * Work is scheduled to update the hardware's view of the PZL.
+ */
+int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
+ if (err < 0) {
+ spin_unlock_irqrestore(&whc->lock, flags);
+ return err;
+ }
+
+ qset = get_qset(whc, urb, GFP_ATOMIC);
+ if (qset == NULL)
+ err = -ENOMEM;
+ else
+ err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
+ if (!err) {
+ if (!qset->in_sw_list && !qset->remove)
+ qset_insert_in_sw_list(whc, qset);
+ } else
+ usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
+
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ if (!err)
+ queue_work(whc->workqueue, &whc->periodic_work);
+
+ return err;
+}
+
+/**
+ * pzl_urb_dequeue - remove an URB (qset) from the periodic list
+ * @whc: the WHCI host controller
+ * @urb: the URB to dequeue
+ * @status: the current status of the URB
+ *
+ * URBs that do yet have qTDs can simply be removed from the software
+ * queue, otherwise the qset must be removed so the qTDs can be safely
+ * removed.
+ */
+int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
+{
+ struct whc_urb *wurb = urb->hcpriv;
+ struct whc_qset *qset = wurb->qset;
+ struct whc_std *std, *t;
+ bool has_qtd = false;
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&whc->lock, flags);
+
+ ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
+ if (ret < 0)
+ goto out;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb == urb) {
+ if (std->qtd)
+ has_qtd = true;
+ qset_free_std(whc, std);
+ } else
+ std->qtd = NULL; /* so this std is re-added when the qset is */
+ }
+
+ if (has_qtd) {
+ pzl_qset_remove(whc, qset);
+ update_pzl_hw_view(whc);
+ wurb->status = status;
+ wurb->is_async = false;
+ queue_work(whc->workqueue, &wurb->dequeue_work);
+ } else
+ qset_remove_urb(whc, qset, urb, status);
+out:
+ spin_unlock_irqrestore(&whc->lock, flags);
+
+ return ret;
+}
+
+/**
+ * pzl_qset_delete - delete a qset from the PZL
+ */
+void pzl_qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+ qset->remove = 1;
+ queue_work(whc->workqueue, &whc->periodic_work);
+ qset_delete(whc, qset);
+}
+
+/**
+ * pzl_init - initialize the periodic zone list
+ * @whc: the WHCI host controller
+ */
+int pzl_init(struct whc *whc)
+{
+ int i;
+
+ whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16,
+ &whc->pz_list_dma, GFP_KERNEL);
+ if (whc->pz_list == NULL)
+ return -ENOMEM;
+
+ /* Set T bit on all elements in PZL. */
+ for (i = 0; i < 16; i++)
+ whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
+
+ le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
+
+ return 0;
+}
+
+/**
+ * pzl_clean_up - free PZL resources
+ * @whc: the WHCI host controller
+ *
+ * The PZL is stopped and empty.
+ */
+void pzl_clean_up(struct whc *whc)
+{
+ if (whc->pz_list)
+ dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list,
+ whc->pz_list_dma);
+}
diff --git a/drivers/staging/wusbcore/host/whci/qset.c b/drivers/staging/wusbcore/host/whci/qset.c
new file mode 100644
index 000000000000..66459b77dc77
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/qset.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) qset management.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
+
+#include "whcd.h"
+
+struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+ dma_addr_t dma;
+
+ qset = dma_pool_zalloc(whc->qset_pool, mem_flags, &dma);
+ if (qset == NULL)
+ return NULL;
+
+ qset->qset_dma = dma;
+ qset->whc = whc;
+
+ INIT_LIST_HEAD(&qset->list_node);
+ INIT_LIST_HEAD(&qset->stds);
+
+ return qset;
+}
+
+/**
+ * qset_fill_qh - fill the static endpoint state in a qset's QHead
+ * @qset: the qset whose QH needs initializing with static endpoint
+ * state
+ * @urb: an urb for a transfer to this endpoint
+ */
+static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
+{
+ struct usb_device *usb_dev = urb->dev;
+ struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
+ struct usb_wireless_ep_comp_descriptor *epcd;
+ bool is_out;
+ uint8_t phy_rate;
+
+ is_out = usb_pipeout(urb->pipe);
+
+ qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
+
+ epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
+ if (epcd) {
+ qset->max_seq = epcd->bMaxSequence;
+ qset->max_burst = epcd->bMaxBurst;
+ } else {
+ qset->max_seq = 2;
+ qset->max_burst = 1;
+ }
+
+ /*
+ * Initial PHY rate is 53.3 Mbit/s for control endpoints or
+ * the maximum supported by the device for other endpoints
+ * (unless limited by the user).
+ */
+ if (usb_pipecontrol(urb->pipe))
+ phy_rate = UWB_PHY_RATE_53;
+ else {
+ uint16_t phy_rates;
+
+ phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
+ phy_rate = fls(phy_rates) - 1;
+ if (phy_rate > whc->wusbhc.phy_rate)
+ phy_rate = whc->wusbhc.phy_rate;
+ }
+
+ qset->qh.info1 = cpu_to_le32(
+ QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
+ | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
+ | usb_pipe_to_qh_type(urb->pipe)
+ | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
+ | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
+ );
+ qset->qh.info2 = cpu_to_le32(
+ QH_INFO2_BURST(qset->max_burst)
+ | QH_INFO2_DBP(0)
+ | QH_INFO2_MAX_COUNT(3)
+ | QH_INFO2_MAX_RETRY(3)
+ | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
+ );
+ /* FIXME: where can we obtain these Tx parameters from? Why
+ * doesn't the chip know what Tx power to use? It knows the Rx
+ * strength and can presumably guess the Tx power required
+ * from that? */
+ qset->qh.info3 = cpu_to_le32(
+ QH_INFO3_TX_RATE(phy_rate)
+ | QH_INFO3_TX_PWR(0) /* 0 == max power */
+ );
+
+ qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
+}
+
+/**
+ * qset_clear - clear fields in a qset so it may be reinserted into a
+ * schedule.
+ *
+ * The sequence number and current window are not cleared (see
+ * qset_reset()).
+ */
+void qset_clear(struct whc *whc, struct whc_qset *qset)
+{
+ qset->td_start = qset->td_end = qset->ntds = 0;
+
+ qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
+ qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
+ qset->qh.err_count = 0;
+ qset->qh.scratch[0] = 0;
+ qset->qh.scratch[1] = 0;
+ qset->qh.scratch[2] = 0;
+
+ memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
+
+ init_completion(&qset->remove_complete);
+}
+
+/**
+ * qset_reset - reset endpoint state in a qset.
+ *
+ * Clears the sequence number and current window. This qset must not
+ * be in the ASL or PZL.
+ */
+void qset_reset(struct whc *whc, struct whc_qset *qset)
+{
+ qset->reset = 0;
+
+ qset->qh.status &= ~QH_STATUS_SEQ_MASK;
+ qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
+}
+
+/**
+ * get_qset - get the qset for an async endpoint
+ *
+ * A new qset is created if one does not already exist.
+ */
+struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct whc_qset *qset;
+
+ qset = urb->ep->hcpriv;
+ if (qset == NULL) {
+ qset = qset_alloc(whc, mem_flags);
+ if (qset == NULL)
+ return NULL;
+
+ qset->ep = urb->ep;
+ urb->ep->hcpriv = qset;
+ qset_fill_qh(whc, qset, urb);
+ }
+ return qset;
+}
+
+void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
+{
+ qset->remove = 0;
+ list_del_init(&qset->list_node);
+ complete(&qset->remove_complete);
+}
+
+/**
+ * qset_add_qtds - add qTDs for an URB to a qset
+ *
+ * Returns true if the list (ASL/PZL) must be updated because (for a
+ * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
+ */
+enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
+{
+ struct whc_std *std;
+ enum whc_update update = 0;
+
+ list_for_each_entry(std, &qset->stds, list_node) {
+ struct whc_qtd *qtd;
+ uint32_t status;
+
+ if (qset->ntds >= WHCI_QSET_TD_MAX
+ || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
+ break;
+
+ if (std->qtd)
+ continue; /* already has a qTD */
+
+ qtd = std->qtd = &qset->qtd[qset->td_end];
+
+ /* Fill in setup bytes for control transfers. */
+ if (usb_pipecontrol(std->urb->pipe))
+ memcpy(qtd->setup, std->urb->setup_packet, 8);
+
+ status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
+
+ if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
+ status |= QTD_STS_LAST_PKT;
+
+ /*
+ * For an IN transfer the iAlt field should be set so
+ * the h/w will automatically advance to the next
+ * transfer. However, if there are 8 or more TDs
+ * remaining in this transfer then iAlt cannot be set
+ * as it could point to somewhere in this transfer.
+ */
+ if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
+ int ialt;
+ ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
+ status |= QTD_STS_IALT(ialt);
+ } else if (usb_pipein(std->urb->pipe))
+ qset->pause_after_urb = std->urb;
+
+ if (std->num_pointers)
+ qtd->options = cpu_to_le32(QTD_OPT_IOC);
+ else
+ qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
+ qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
+
+ qtd->status = cpu_to_le32(status);
+
+ if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
+ update = WHC_UPDATE_UPDATED;
+
+ if (++qset->td_end >= WHCI_QSET_TD_MAX)
+ qset->td_end = 0;
+ qset->ntds++;
+ }
+
+ return update;
+}
+
+/**
+ * qset_remove_qtd - remove the first qTD from a qset.
+ *
+ * The qTD might be still active (if it's part of a IN URB that
+ * resulted in a short read) so ensure it's deactivated.
+ */
+static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
+{
+ qset->qtd[qset->td_start].status = 0;
+
+ if (++qset->td_start >= WHCI_QSET_TD_MAX)
+ qset->td_start = 0;
+ qset->ntds--;
+}
+
+static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
+{
+ struct scatterlist *sg;
+ void *bounce;
+ size_t remaining, offset;
+
+ bounce = std->bounce_buf;
+ remaining = std->len;
+
+ sg = std->bounce_sg;
+ offset = std->bounce_offset;
+
+ while (remaining) {
+ size_t len;
+
+ len = min(sg->length - offset, remaining);
+ memcpy(sg_virt(sg) + offset, bounce, len);
+
+ bounce += len;
+ remaining -= len;
+
+ offset += len;
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ }
+ }
+
+}
+
+/**
+ * qset_free_std - remove an sTD and free it.
+ * @whc: the WHCI host controller
+ * @std: the sTD to remove and free.
+ */
+void qset_free_std(struct whc *whc, struct whc_std *std)
+{
+ list_del(&std->list_node);
+ if (std->bounce_buf) {
+ bool is_out = usb_pipeout(std->urb->pipe);
+ dma_addr_t dma_addr;
+
+ if (std->num_pointers)
+ dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
+ else
+ dma_addr = std->dma_addr;
+
+ dma_unmap_single(whc->wusbhc.dev, dma_addr,
+ std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!is_out)
+ qset_copy_bounce_to_sg(whc, std);
+ kfree(std->bounce_buf);
+ }
+ if (std->pl_virt) {
+ if (!dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
+ dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
+ std->num_pointers * sizeof(struct whc_page_list_entry),
+ DMA_TO_DEVICE);
+ kfree(std->pl_virt);
+ std->pl_virt = NULL;
+ }
+ kfree(std);
+}
+
+/**
+ * qset_remove_qtds - remove an URB's qTDs (and sTDs).
+ */
+static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb)
+{
+ struct whc_std *std, *t;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb != urb)
+ break;
+ if (std->qtd != NULL)
+ qset_remove_qtd(whc, qset);
+ qset_free_std(whc, std);
+ }
+}
+
+/**
+ * qset_free_stds - free any remaining sTDs for an URB.
+ */
+static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
+{
+ struct whc_std *std, *t;
+
+ list_for_each_entry_safe(std, t, &qset->stds, list_node) {
+ if (std->urb == urb)
+ qset_free_std(qset->whc, std);
+ }
+}
+
+static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
+{
+ dma_addr_t dma_addr = std->dma_addr;
+ dma_addr_t sp, ep;
+ size_t pl_len;
+ int p;
+
+ /* Short buffers don't need a page list. */
+ if (std->len <= WHCI_PAGE_SIZE) {
+ std->num_pointers = 0;
+ return 0;
+ }
+
+ sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
+ ep = dma_addr + std->len;
+ std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
+
+ pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+ std->pl_virt = kmalloc(pl_len, mem_flags);
+ if (std->pl_virt == NULL)
+ return -ENOMEM;
+ std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
+ kfree(std->pl_virt);
+ return -EFAULT;
+ }
+
+ for (p = 0; p < std->num_pointers; p++) {
+ std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+ dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
+ }
+
+ return 0;
+}
+
+/**
+ * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
+ */
+static void urb_dequeue_work(struct work_struct *work)
+{
+ struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
+ struct whc_qset *qset = wurb->qset;
+ struct whc *whc = qset->whc;
+ unsigned long flags;
+
+ if (wurb->is_async)
+ asl_update(whc, WUSBCMD_ASYNC_UPDATED
+ | WUSBCMD_ASYNC_SYNCED_DB
+ | WUSBCMD_ASYNC_QSET_RM);
+ else
+ pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
+ | WUSBCMD_PERIODIC_SYNCED_DB
+ | WUSBCMD_PERIODIC_QSET_RM);
+
+ spin_lock_irqsave(&whc->lock, flags);
+ qset_remove_urb(whc, qset, wurb->urb, wurb->status);
+ spin_unlock_irqrestore(&whc->lock, flags);
+}
+
+static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, gfp_t mem_flags)
+{
+ struct whc_std *std;
+
+ std = kzalloc(sizeof(struct whc_std), mem_flags);
+ if (std == NULL)
+ return NULL;
+
+ std->urb = urb;
+ std->qtd = NULL;
+
+ INIT_LIST_HEAD(&std->list_node);
+ list_add_tail(&std->list_node, &qset->stds);
+
+ return std;
+}
+
+static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+ gfp_t mem_flags)
+{
+ size_t remaining;
+ struct scatterlist *sg;
+ int i;
+ int ntds = 0;
+ struct whc_std *std = NULL;
+ struct whc_page_list_entry *new_pl_virt;
+ dma_addr_t prev_end = 0;
+ size_t pl_len;
+ int p = 0;
+
+ remaining = urb->transfer_buffer_length;
+
+ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
+ dma_addr_t dma_addr;
+ size_t dma_remaining;
+ dma_addr_t sp, ep;
+ int num_pointers;
+
+ if (remaining == 0) {
+ break;
+ }
+
+ dma_addr = sg_dma_address(sg);
+ dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
+
+ while (dma_remaining) {
+ size_t dma_len;
+
+ /*
+ * We can use the previous std (if it exists) provided that:
+ * - the previous one ended on a page boundary.
+ * - the current one begins on a page boundary.
+ * - the previous one isn't full.
+ *
+ * If a new std is needed but the previous one
+ * was not a whole number of packets then this
+ * sg list cannot be mapped onto multiple
+ * qTDs. Return an error and let the caller
+ * sort it out.
+ */
+ if (!std
+ || (prev_end & (WHCI_PAGE_SIZE-1))
+ || (dma_addr & (WHCI_PAGE_SIZE-1))
+ || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
+ if (std && std->len % qset->max_packet != 0)
+ return -EINVAL;
+ std = qset_new_std(whc, qset, urb, mem_flags);
+ if (std == NULL) {
+ return -ENOMEM;
+ }
+ ntds++;
+ p = 0;
+ }
+
+ dma_len = dma_remaining;
+
+ /*
+ * If the remainder of this element doesn't
+ * fit in a single qTD, limit the qTD to a
+ * whole number of packets. This allows the
+ * remainder to go into the next qTD.
+ */
+ if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
+ dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
+ * qset->max_packet - std->len;
+ }
+
+ std->len += dma_len;
+ std->ntds_remaining = -1; /* filled in later */
+
+ sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
+ ep = dma_addr + dma_len;
+ num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
+ std->num_pointers += num_pointers;
+
+ pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+
+ new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
+ if (new_pl_virt == NULL) {
+ kfree(std->pl_virt);
+ std->pl_virt = NULL;
+ return -ENOMEM;
+ }
+ std->pl_virt = new_pl_virt;
+
+ for (;p < std->num_pointers; p++) {
+ std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+ dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
+ }
+
+ prev_end = dma_addr = ep;
+ dma_remaining -= dma_len;
+ remaining -= dma_len;
+ }
+ }
+
+ /* Now the number of stds is know, go back and fill in
+ std->ntds_remaining. */
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (std->ntds_remaining == -1) {
+ pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
+ std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
+ pl_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr))
+ return -EFAULT;
+ std->ntds_remaining = ntds--;
+ }
+ }
+ return 0;
+}
+
+/**
+ * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
+ *
+ * If the URB contains an sg list whose elements cannot be directly
+ * mapped to qTDs then the data must be transferred via bounce
+ * buffers.
+ */
+static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, gfp_t mem_flags)
+{
+ bool is_out = usb_pipeout(urb->pipe);
+ size_t max_std_len;
+ size_t remaining;
+ int ntds = 0;
+ struct whc_std *std = NULL;
+ void *bounce = NULL;
+ struct scatterlist *sg;
+ int i;
+
+ /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
+ max_std_len = qset->max_burst * qset->max_packet;
+
+ remaining = urb->transfer_buffer_length;
+
+ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
+ size_t len;
+ size_t sg_remaining;
+ void *orig;
+
+ if (remaining == 0) {
+ break;
+ }
+
+ sg_remaining = min_t(size_t, remaining, sg->length);
+ orig = sg_virt(sg);
+
+ while (sg_remaining) {
+ if (!std || std->len == max_std_len) {
+ std = qset_new_std(whc, qset, urb, mem_flags);
+ if (std == NULL)
+ return -ENOMEM;
+ std->bounce_buf = kmalloc(max_std_len, mem_flags);
+ if (std->bounce_buf == NULL)
+ return -ENOMEM;
+ std->bounce_sg = sg;
+ std->bounce_offset = orig - sg_virt(sg);
+ bounce = std->bounce_buf;
+ ntds++;
+ }
+
+ len = min(sg_remaining, max_std_len - std->len);
+
+ if (is_out)
+ memcpy(bounce, orig, len);
+
+ std->len += len;
+ std->ntds_remaining = -1; /* filled in later */
+
+ bounce += len;
+ orig += len;
+ sg_remaining -= len;
+ remaining -= len;
+ }
+ }
+
+ /*
+ * For each of the new sTDs, map the bounce buffers, create
+ * page lists (if necessary), and fill in std->ntds_remaining.
+ */
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (std->ntds_remaining != -1)
+ continue;
+
+ std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
+ is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (dma_mapping_error(&whc->umc->dev, std->dma_addr))
+ return -EFAULT;
+
+ if (qset_fill_page_list(whc, std, mem_flags) < 0)
+ return -ENOMEM;
+
+ std->ntds_remaining = ntds--;
+ }
+
+ return 0;
+}
+
+/**
+ * qset_add_urb - add an urb to the qset's queue.
+ *
+ * The URB is chopped into sTDs, one for each qTD that will required.
+ * At least one qTD (and sTD) is required even if the transfer has no
+ * data (e.g., for some control transfers).
+ */
+int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct whc_urb *wurb;
+ int remaining = urb->transfer_buffer_length;
+ u64 transfer_dma = urb->transfer_dma;
+ int ntds_remaining;
+ int ret;
+
+ wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
+ if (wurb == NULL)
+ goto err_no_mem;
+ urb->hcpriv = wurb;
+ wurb->qset = qset;
+ wurb->urb = urb;
+ INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
+
+ if (urb->num_sgs) {
+ ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
+ if (ret == -EINVAL) {
+ qset_free_stds(qset, urb);
+ ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
+ }
+ if (ret < 0)
+ goto err_no_mem;
+ return 0;
+ }
+
+ ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
+ if (ntds_remaining == 0)
+ ntds_remaining = 1;
+
+ while (ntds_remaining) {
+ struct whc_std *std;
+ size_t std_len;
+
+ std_len = remaining;
+ if (std_len > QTD_MAX_XFER_SIZE)
+ std_len = QTD_MAX_XFER_SIZE;
+
+ std = qset_new_std(whc, qset, urb, mem_flags);
+ if (std == NULL)
+ goto err_no_mem;
+
+ std->dma_addr = transfer_dma;
+ std->len = std_len;
+ std->ntds_remaining = ntds_remaining;
+
+ if (qset_fill_page_list(whc, std, mem_flags) < 0)
+ goto err_no_mem;
+
+ ntds_remaining--;
+ remaining -= std_len;
+ transfer_dma += std_len;
+ }
+
+ return 0;
+
+err_no_mem:
+ qset_free_stds(qset, urb);
+ return -ENOMEM;
+}
+
+/**
+ * qset_remove_urb - remove an URB from the urb queue.
+ *
+ * The URB is returned to the USB subsystem.
+ */
+void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, int status)
+{
+ struct wusbhc *wusbhc = &whc->wusbhc;
+ struct whc_urb *wurb = urb->hcpriv;
+
+ usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
+ /* Drop the lock as urb->complete() may enqueue another urb. */
+ spin_unlock(&whc->lock);
+ wusbhc_giveback_urb(wusbhc, urb, status);
+ spin_lock(&whc->lock);
+
+ kfree(wurb);
+}
+
+/**
+ * get_urb_status_from_qtd - get the completed urb status from qTD status
+ * @urb: completed urb
+ * @status: qTD status
+ */
+static int get_urb_status_from_qtd(struct urb *urb, u32 status)
+{
+ if (status & QTD_STS_HALTED) {
+ if (status & QTD_STS_DBE)
+ return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
+ else if (status & QTD_STS_BABBLE)
+ return -EOVERFLOW;
+ else if (status & QTD_STS_RCE)
+ return -ETIME;
+ return -EPIPE;
+ }
+ if (usb_pipein(urb->pipe)
+ && (urb->transfer_flags & URB_SHORT_NOT_OK)
+ && urb->actual_length < urb->transfer_buffer_length)
+ return -EREMOTEIO;
+ return 0;
+}
+
+/**
+ * process_inactive_qtd - process an inactive (but not halted) qTD.
+ *
+ * Update the urb with the transfer bytes from the qTD, if the urb is
+ * completely transferred or (in the case of an IN only) the LPF is
+ * set, then the transfer is complete and the urb should be returned
+ * to the system.
+ */
+void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd)
+{
+ struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
+ struct urb *urb = std->urb;
+ uint32_t status;
+ bool complete;
+
+ status = le32_to_cpu(qtd->status);
+
+ urb->actual_length += std->len - QTD_STS_TO_LEN(status);
+
+ if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
+ complete = true;
+ else
+ complete = whc_std_last(std);
+
+ qset_remove_qtd(whc, qset);
+ qset_free_std(whc, std);
+
+ /*
+ * Transfers for this URB are complete? Then return it to the
+ * USB subsystem.
+ */
+ if (complete) {
+ qset_remove_qtds(whc, qset, urb);
+ qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
+
+ /*
+ * If iAlt isn't valid then the hardware didn't
+ * advance iCur. Adjust the start and end pointers to
+ * match iCur.
+ */
+ if (!(status & QTD_STS_IALT_VALID))
+ qset->td_start = qset->td_end
+ = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
+ qset->pause_after_urb = NULL;
+ }
+}
+
+/**
+ * process_halted_qtd - process a qset with a halted qtd
+ *
+ * Remove all the qTDs for the failed URB and return the failed URB to
+ * the USB subsystem. Then remove all other qTDs so the qset can be
+ * removed.
+ *
+ * FIXME: this is the point where rate adaptation can be done. If a
+ * transfer failed because it exceeded the maximum number of retries
+ * then it could be reactivated with a slower rate without having to
+ * remove the qset.
+ */
+void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd)
+{
+ struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
+ struct urb *urb = std->urb;
+ int urb_status;
+
+ urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
+
+ qset_remove_qtds(whc, qset, urb);
+ qset_remove_urb(whc, qset, urb, urb_status);
+
+ list_for_each_entry(std, &qset->stds, list_node) {
+ if (qset->ntds == 0)
+ break;
+ qset_remove_qtd(whc, qset);
+ std->qtd = NULL;
+ }
+
+ qset->remove = 1;
+}
+
+void qset_free(struct whc *whc, struct whc_qset *qset)
+{
+ dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
+}
+
+/**
+ * qset_delete - wait for a qset to be unused, then free it.
+ */
+void qset_delete(struct whc *whc, struct whc_qset *qset)
+{
+ wait_for_completion(&qset->remove_complete);
+ qset_free(whc, qset);
+}
diff --git a/drivers/staging/wusbcore/host/whci/whcd.h b/drivers/staging/wusbcore/host/whci/whcd.h
new file mode 100644
index 000000000000..a442a2589e83
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/whcd.h
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) private header.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#ifndef __WHCD_H
+#define __WHCD_H
+
+#include <linux/workqueue.h>
+
+#include "../../../uwb/include/whci.h"
+#include "../../../uwb/include/umc.h"
+#include "whci-hc.h"
+
+/* Generic command timeout. */
+#define WHC_GENCMD_TIMEOUT_MS 100
+
+struct whc_dbg;
+
+struct whc {
+ struct wusbhc wusbhc;
+ struct umc_dev *umc;
+
+ resource_size_t base_phys;
+ void __iomem *base;
+ int irq;
+
+ u8 n_devices;
+ u8 n_keys;
+ u8 n_mmc_ies;
+
+ u64 *pz_list;
+ struct dn_buf_entry *dn_buf;
+ struct di_buf_entry *di_buf;
+ dma_addr_t pz_list_dma;
+ dma_addr_t dn_buf_dma;
+ dma_addr_t di_buf_dma;
+
+ spinlock_t lock;
+ struct mutex mutex;
+
+ void * gen_cmd_buf;
+ dma_addr_t gen_cmd_buf_dma;
+ wait_queue_head_t cmd_wq;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct dn_work;
+
+ struct dma_pool *qset_pool;
+
+ struct list_head async_list;
+ struct list_head async_removed_list;
+ wait_queue_head_t async_list_wq;
+ struct work_struct async_work;
+
+ struct list_head periodic_list[5];
+ struct list_head periodic_removed_list;
+ wait_queue_head_t periodic_list_wq;
+ struct work_struct periodic_work;
+
+ struct whc_dbg *dbg;
+};
+
+#define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc))
+
+/**
+ * struct whc_std - a software TD.
+ * @urb: the URB this sTD is for.
+ * @offset: start of the URB's data for this TD.
+ * @len: the length of data in the associated TD.
+ * @ntds_remaining: number of TDs (starting from this one) in this transfer.
+ *
+ * @bounce_buf: a bounce buffer if the std was from an urb with a sg
+ * list that could not be mapped to qTDs directly.
+ * @bounce_sg: the first scatterlist element bounce_buf is for.
+ * @bounce_offset: the offset into bounce_sg for the start of bounce_buf.
+ *
+ * Queued URBs may require more TDs than are available in a qset so we
+ * use a list of these "software TDs" (sTDs) to hold per-TD data.
+ */
+struct whc_std {
+ struct urb *urb;
+ size_t len;
+ int ntds_remaining;
+ struct whc_qtd *qtd;
+
+ struct list_head list_node;
+ int num_pointers;
+ dma_addr_t dma_addr;
+ struct whc_page_list_entry *pl_virt;
+
+ void *bounce_buf;
+ struct scatterlist *bounce_sg;
+ unsigned bounce_offset;
+};
+
+/**
+ * struct whc_urb - per URB host controller structure.
+ * @urb: the URB this struct is for.
+ * @qset: the qset associated to the URB.
+ * @dequeue_work: the work to remove the URB when dequeued.
+ * @is_async: the URB belongs to async sheduler or not.
+ * @status: the status to be returned when calling wusbhc_giveback_urb.
+ */
+struct whc_urb {
+ struct urb *urb;
+ struct whc_qset *qset;
+ struct work_struct dequeue_work;
+ bool is_async;
+ int status;
+};
+
+/**
+ * whc_std_last - is this sTD the URB's last?
+ * @std: the sTD to check.
+ */
+static inline bool whc_std_last(struct whc_std *std)
+{
+ return std->ntds_remaining <= 1;
+}
+
+enum whc_update {
+ WHC_UPDATE_ADDED = 0x01,
+ WHC_UPDATE_REMOVED = 0x02,
+ WHC_UPDATE_UPDATED = 0x04,
+};
+
+/* init.c */
+int whc_init(struct whc *whc);
+void whc_clean_up(struct whc *whc);
+
+/* hw.c */
+void whc_write_wusbcmd(struct whc *whc, u32 mask, u32 val);
+int whc_do_gencmd(struct whc *whc, u32 cmd, u32 params, void *addr, size_t len);
+void whc_hw_error(struct whc *whc, const char *reason);
+
+/* wusb.c */
+int whc_wusbhc_start(struct wusbhc *wusbhc);
+void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay);
+int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+ u8 handle, struct wuie_hdr *wuie);
+int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle);
+int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm);
+int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
+int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots);
+int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+ const void *ptk, size_t key_size);
+int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+ const void *gtk, size_t key_size);
+int whc_set_cluster_id(struct whc *whc, u8 bcid);
+
+/* int.c */
+irqreturn_t whc_int_handler(struct usb_hcd *hcd);
+void whc_dn_work(struct work_struct *work);
+
+/* asl.c */
+void asl_start(struct whc *whc);
+void asl_stop(struct whc *whc);
+int asl_init(struct whc *whc);
+void asl_clean_up(struct whc *whc);
+int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
+void asl_qset_delete(struct whc *whc, struct whc_qset *qset);
+void scan_async_work(struct work_struct *work);
+
+/* pzl.c */
+int pzl_init(struct whc *whc);
+void pzl_clean_up(struct whc *whc);
+void pzl_start(struct whc *whc);
+void pzl_stop(struct whc *whc);
+int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status);
+void pzl_qset_delete(struct whc *whc, struct whc_qset *qset);
+void scan_periodic_work(struct work_struct *work);
+
+/* qset.c */
+struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags);
+void qset_free(struct whc *whc, struct whc_qset *qset);
+struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags);
+void qset_delete(struct whc *whc, struct whc_qset *qset);
+void qset_clear(struct whc *whc, struct whc_qset *qset);
+void qset_reset(struct whc *whc, struct whc_qset *qset);
+int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
+ gfp_t mem_flags);
+void qset_free_std(struct whc *whc, struct whc_std *std);
+void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
+ struct urb *urb, int status);
+void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd);
+void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
+ struct whc_qtd *qtd);
+enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset);
+void qset_remove_complete(struct whc *whc, struct whc_qset *qset);
+void pzl_update(struct whc *whc, uint32_t wusbcmd);
+void asl_update(struct whc *whc, uint32_t wusbcmd);
+
+/* debug.c */
+void whc_dbg_init(struct whc *whc);
+void whc_dbg_clean_up(struct whc *whc);
+
+#endif /* #ifndef __WHCD_H */
diff --git a/drivers/staging/wusbcore/host/whci/whci-hc.h b/drivers/staging/wusbcore/host/whci/whci-hc.h
new file mode 100644
index 000000000000..5a86a57a80cc
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/whci-hc.h
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) data structures.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#ifndef _WHCI_WHCI_HC_H
+#define _WHCI_WHCI_HC_H
+
+#include <linux/list.h>
+
+/**
+ * WHCI_PAGE_SIZE - page size use by WHCI
+ *
+ * WHCI assumes that host system uses pages of 4096 octets.
+ */
+#define WHCI_PAGE_SIZE 4096
+
+
+/**
+ * QTD_MAX_TXFER_SIZE - max number of bytes to transfer with a single
+ * qtd.
+ *
+ * This is 2^20 - 1.
+ */
+#define QTD_MAX_XFER_SIZE 1048575
+
+
+/**
+ * struct whc_qtd - Queue Element Transfer Descriptors (qTD)
+ *
+ * This describes the data for a bulk, control or interrupt transfer.
+ *
+ * [WHCI] section 3.2.4
+ */
+struct whc_qtd {
+ __le32 status; /*< remaining transfer len and transfer status */
+ __le32 options;
+ __le64 page_list_ptr; /*< physical pointer to data buffer page list*/
+ __u8 setup[8]; /*< setup data for control transfers */
+} __attribute__((packed));
+
+#define QTD_STS_ACTIVE (1 << 31) /* enable execution of transaction */
+#define QTD_STS_HALTED (1 << 30) /* transfer halted */
+#define QTD_STS_DBE (1 << 29) /* data buffer error */
+#define QTD_STS_BABBLE (1 << 28) /* babble detected */
+#define QTD_STS_RCE (1 << 27) /* retry count exceeded */
+#define QTD_STS_LAST_PKT (1 << 26) /* set Last Packet Flag in WUSB header */
+#define QTD_STS_INACTIVE (1 << 25) /* queue set is marked inactive */
+#define QTD_STS_IALT_VALID (1 << 23) /* iAlt field is valid */
+#define QTD_STS_IALT(i) (QTD_STS_IALT_VALID | ((i) << 20)) /* iAlt field */
+#define QTD_STS_LEN(l) ((l) << 0) /* transfer length */
+#define QTD_STS_TO_LEN(s) ((s) & 0x000fffff)
+
+#define QTD_OPT_IOC (1 << 1) /* page_list_ptr points to buffer directly */
+#define QTD_OPT_SMALL (1 << 0) /* interrupt on complete */
+
+/**
+ * struct whc_itd - Isochronous Queue Element Transfer Descriptors (iTD)
+ *
+ * This describes the data and other parameters for an isochronous
+ * transfer.
+ *
+ * [WHCI] section 3.2.5
+ */
+struct whc_itd {
+ __le16 presentation_time; /*< presentation time for OUT transfers */
+ __u8 num_segments; /*< number of data segments in segment list */
+ __u8 status; /*< command execution status */
+ __le32 options; /*< misc transfer options */
+ __le64 page_list_ptr; /*< physical pointer to data buffer page list */
+ __le64 seg_list_ptr; /*< physical pointer to segment list */
+} __attribute__((packed));
+
+#define ITD_STS_ACTIVE (1 << 7) /* enable execution of transaction */
+#define ITD_STS_DBE (1 << 5) /* data buffer error */
+#define ITD_STS_BABBLE (1 << 4) /* babble detected */
+#define ITD_STS_INACTIVE (1 << 1) /* queue set is marked inactive */
+
+#define ITD_OPT_IOC (1 << 1) /* interrupt on complete */
+#define ITD_OPT_SMALL (1 << 0) /* page_list_ptr points to buffer directly */
+
+/**
+ * Page list entry.
+ *
+ * A TD's page list must contain sufficient page list entries for the
+ * total data length in the TD.
+ *
+ * [WHCI] section 3.2.4.3
+ */
+struct whc_page_list_entry {
+ __le64 buf_ptr; /*< physical pointer to buffer */
+} __attribute__((packed));
+
+/**
+ * struct whc_seg_list_entry - Segment list entry.
+ *
+ * Describes a portion of the data buffer described in the containing
+ * qTD's page list.
+ *
+ * seg_ptr = qtd->page_list_ptr[qtd->seg_list_ptr[seg].idx].buf_ptr
+ * + qtd->seg_list_ptr[seg].offset;
+ *
+ * Segments can't cross page boundries.
+ *
+ * [WHCI] section 3.2.5.5
+ */
+struct whc_seg_list_entry {
+ __le16 len; /*< segment length */
+ __u8 idx; /*< index into page list */
+ __u8 status; /*< segment status */
+ __le16 offset; /*< 12 bit offset into page */
+} __attribute__((packed));
+
+/**
+ * struct whc_qhead - endpoint and status information for a qset.
+ *
+ * [WHCI] section 3.2.6
+ */
+struct whc_qhead {
+ __le64 link; /*< next qset in list */
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le16 status;
+ __le16 err_count; /*< transaction error count */
+ __le32 cur_window;
+ __le32 scratch[3]; /*< h/w scratch area */
+ union {
+ struct whc_qtd qtd;
+ struct whc_itd itd;
+ } overlay;
+} __attribute__((packed));
+
+#define QH_LINK_PTR_MASK (~0x03Full)
+#define QH_LINK_PTR(ptr) ((ptr) & QH_LINK_PTR_MASK)
+#define QH_LINK_IQS (1 << 4) /* isochronous queue set */
+#define QH_LINK_NTDS(n) (((n) - 1) << 1) /* number of TDs in queue set */
+#define QH_LINK_T (1 << 0) /* last queue set in periodic schedule list */
+
+#define QH_INFO1_EP(e) ((e) << 0) /* endpoint number */
+#define QH_INFO1_DIR_IN (1 << 4) /* IN transfer */
+#define QH_INFO1_DIR_OUT (0 << 4) /* OUT transfer */
+#define QH_INFO1_TR_TYPE_CTRL (0x0 << 5) /* control transfer */
+#define QH_INFO1_TR_TYPE_ISOC (0x1 << 5) /* isochronous transfer */
+#define QH_INFO1_TR_TYPE_BULK (0x2 << 5) /* bulk transfer */
+#define QH_INFO1_TR_TYPE_INT (0x3 << 5) /* interrupt */
+#define QH_INFO1_TR_TYPE_LP_INT (0x7 << 5) /* low power interrupt */
+#define QH_INFO1_DEV_INFO_IDX(i) ((i) << 8) /* index into device info buffer */
+#define QH_INFO1_SET_INACTIVE (1 << 15) /* set inactive after transfer */
+#define QH_INFO1_MAX_PKT_LEN(l) ((l) << 16) /* maximum packet length */
+
+#define QH_INFO2_BURST(b) ((b) << 0) /* maximum burst length */
+#define QH_INFO2_DBP(p) ((p) << 5) /* data burst policy (see [WUSB] table 5-7) */
+#define QH_INFO2_MAX_COUNT(c) ((c) << 8) /* max isoc/int pkts per zone */
+#define QH_INFO2_RQS (1 << 15) /* reactivate queue set */
+#define QH_INFO2_MAX_RETRY(r) ((r) << 16) /* maximum transaction retries */
+#define QH_INFO2_MAX_SEQ(s) ((s) << 20) /* maximum sequence number */
+#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */
+#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */
+
+#define QH_INFO3_TX_RATE(r) ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */
+#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
+
+#define QH_STATUS_FLOW_CTRL (1 << 15)
+#define QH_STATUS_ICUR(i) ((i) << 5)
+#define QH_STATUS_TO_ICUR(s) (((s) >> 5) & 0x7)
+#define QH_STATUS_SEQ_MASK 0x1f
+
+/**
+ * usb_pipe_to_qh_type - USB core pipe type to QH transfer type
+ *
+ * Returns the QH type field for a USB core pipe type.
+ */
+static inline unsigned usb_pipe_to_qh_type(unsigned pipe)
+{
+ static const unsigned type[] = {
+ [PIPE_ISOCHRONOUS] = QH_INFO1_TR_TYPE_ISOC,
+ [PIPE_INTERRUPT] = QH_INFO1_TR_TYPE_INT,
+ [PIPE_CONTROL] = QH_INFO1_TR_TYPE_CTRL,
+ [PIPE_BULK] = QH_INFO1_TR_TYPE_BULK,
+ };
+ return type[usb_pipetype(pipe)];
+}
+
+/**
+ * Maxiumum number of TDs in a qset.
+ */
+#define WHCI_QSET_TD_MAX 8
+
+/**
+ * struct whc_qset - WUSB data transfers to a specific endpoint
+ * @qh: the QHead of this qset
+ * @qtd: up to 8 qTDs (for qsets for control, bulk and interrupt
+ * transfers)
+ * @itd: up to 8 iTDs (for qsets for isochronous transfers)
+ * @qset_dma: DMA address for this qset
+ * @whc: WHCI HC this qset is for
+ * @ep: endpoint
+ * @stds: list of sTDs queued to this qset
+ * @ntds: number of qTDs queued (not necessarily the same as nTDs
+ * field in the QH)
+ * @td_start: index of the first qTD in the list
+ * @td_end: index of next free qTD in the list (provided
+ * ntds < WHCI_QSET_TD_MAX)
+ *
+ * Queue Sets (qsets) are added to the asynchronous schedule list
+ * (ASL) or the periodic zone list (PZL).
+ *
+ * qsets may contain up to 8 TDs (either qTDs or iTDs as appropriate).
+ * Each TD may refer to at most 1 MiB of data. If a single transfer
+ * has > 8MiB of data, TDs can be reused as they are completed since
+ * the TD list is used as a circular buffer. Similarly, several
+ * (smaller) transfers may be queued in a qset.
+ *
+ * WHCI controllers may cache portions of the qsets in the ASL and
+ * PZL, requiring the WHCD to inform the WHC that the lists have been
+ * updated (fields changed or qsets inserted or removed). For safe
+ * insertion and removal of qsets from the lists the schedule must be
+ * stopped to avoid races in updating the QH link pointers.
+ *
+ * Since the HC is free to execute qsets in any order, all transfers
+ * to an endpoint should use the same qset to ensure transfers are
+ * executed in the order they're submitted.
+ *
+ * [WHCI] section 3.2.3
+ */
+struct whc_qset {
+ struct whc_qhead qh;
+ union {
+ struct whc_qtd qtd[WHCI_QSET_TD_MAX];
+ struct whc_itd itd[WHCI_QSET_TD_MAX];
+ };
+
+ /* private data for WHCD */
+ dma_addr_t qset_dma;
+ struct whc *whc;
+ struct usb_host_endpoint *ep;
+ struct list_head stds;
+ int ntds;
+ int td_start;
+ int td_end;
+ struct list_head list_node;
+ unsigned in_sw_list:1;
+ unsigned in_hw_list:1;
+ unsigned remove:1;
+ unsigned reset:1;
+ struct urb *pause_after_urb;
+ struct completion remove_complete;
+ uint16_t max_packet;
+ uint8_t max_burst;
+ uint8_t max_seq;
+};
+
+static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
+{
+ if (target)
+ *ptr = (*ptr & ~(QH_LINK_PTR_MASK | QH_LINK_T)) | QH_LINK_PTR(target);
+ else
+ *ptr = QH_LINK_T;
+}
+
+/**
+ * struct di_buf_entry - Device Information (DI) buffer entry.
+ *
+ * There's one of these per connected device.
+ */
+struct di_buf_entry {
+ __le32 availability_info[8]; /*< MAS availability information, one MAS per bit */
+ __le32 addr_sec_info; /*< addressing and security info */
+ __le32 reserved[7];
+} __attribute__((packed));
+
+#define WHC_DI_SECURE (1 << 31)
+#define WHC_DI_DISABLE (1 << 30)
+#define WHC_DI_KEY_IDX(k) ((k) << 8)
+#define WHC_DI_KEY_IDX_MASK 0x0000ff00
+#define WHC_DI_DEV_ADDR(a) ((a) << 0)
+#define WHC_DI_DEV_ADDR_MASK 0x000000ff
+
+/**
+ * struct dn_buf_entry - Device Notification (DN) buffer entry.
+ *
+ * [WHCI] section 3.2.8
+ */
+struct dn_buf_entry {
+ __u8 msg_size; /*< number of octets of valid DN data */
+ __u8 reserved1;
+ __u8 src_addr; /*< source address */
+ __u8 status; /*< buffer entry status */
+ __le32 tkid; /*< TKID for source device, valid if secure bit is set */
+ __u8 dn_data[56]; /*< up to 56 octets of DN data */
+} __attribute__((packed));
+
+#define WHC_DN_STATUS_VALID (1 << 7) /* buffer entry is valid */
+#define WHC_DN_STATUS_SECURE (1 << 6) /* notification received using secure frame */
+
+#define WHC_N_DN_ENTRIES (4096 / sizeof(struct dn_buf_entry))
+
+/* The Add MMC IE WUSB Generic Command may take up to 256 bytes of
+ data. [WHCI] section 2.4.7. */
+#define WHC_GEN_CMD_DATA_LEN 256
+
+/*
+ * HC registers.
+ *
+ * [WHCI] section 2.4
+ */
+
+#define WHCIVERSION 0x00
+
+#define WHCSPARAMS 0x04
+# define WHCSPARAMS_TO_N_MMC_IES(p) (((p) >> 16) & 0xff)
+# define WHCSPARAMS_TO_N_KEYS(p) (((p) >> 8) & 0xff)
+# define WHCSPARAMS_TO_N_DEVICES(p) (((p) >> 0) & 0x7f)
+
+#define WUSBCMD 0x08
+# define WUSBCMD_BCID(b) ((b) << 16)
+# define WUSBCMD_BCID_MASK (0xff << 16)
+# define WUSBCMD_ASYNC_QSET_RM (1 << 12)
+# define WUSBCMD_PERIODIC_QSET_RM (1 << 11)
+# define WUSBCMD_WUSBSI(s) ((s) << 8)
+# define WUSBCMD_WUSBSI_MASK (0x7 << 8)
+# define WUSBCMD_ASYNC_SYNCED_DB (1 << 7)
+# define WUSBCMD_PERIODIC_SYNCED_DB (1 << 6)
+# define WUSBCMD_ASYNC_UPDATED (1 << 5)
+# define WUSBCMD_PERIODIC_UPDATED (1 << 4)
+# define WUSBCMD_ASYNC_EN (1 << 3)
+# define WUSBCMD_PERIODIC_EN (1 << 2)
+# define WUSBCMD_WHCRESET (1 << 1)
+# define WUSBCMD_RUN (1 << 0)
+
+#define WUSBSTS 0x0c
+# define WUSBSTS_ASYNC_SCHED (1 << 15)
+# define WUSBSTS_PERIODIC_SCHED (1 << 14)
+# define WUSBSTS_DNTS_SCHED (1 << 13)
+# define WUSBSTS_HCHALTED (1 << 12)
+# define WUSBSTS_GEN_CMD_DONE (1 << 9)
+# define WUSBSTS_CHAN_TIME_ROLLOVER (1 << 8)
+# define WUSBSTS_DNTS_OVERFLOW (1 << 7)
+# define WUSBSTS_BPST_ADJUSTMENT_CHANGED (1 << 6)
+# define WUSBSTS_HOST_ERR (1 << 5)
+# define WUSBSTS_ASYNC_SCHED_SYNCED (1 << 4)
+# define WUSBSTS_PERIODIC_SCHED_SYNCED (1 << 3)
+# define WUSBSTS_DNTS_INT (1 << 2)
+# define WUSBSTS_ERR_INT (1 << 1)
+# define WUSBSTS_INT (1 << 0)
+# define WUSBSTS_INT_MASK 0x3ff
+
+#define WUSBINTR 0x10
+# define WUSBINTR_GEN_CMD_DONE (1 << 9)
+# define WUSBINTR_CHAN_TIME_ROLLOVER (1 << 8)
+# define WUSBINTR_DNTS_OVERFLOW (1 << 7)
+# define WUSBINTR_BPST_ADJUSTMENT_CHANGED (1 << 6)
+# define WUSBINTR_HOST_ERR (1 << 5)
+# define WUSBINTR_ASYNC_SCHED_SYNCED (1 << 4)
+# define WUSBINTR_PERIODIC_SCHED_SYNCED (1 << 3)
+# define WUSBINTR_DNTS_INT (1 << 2)
+# define WUSBINTR_ERR_INT (1 << 1)
+# define WUSBINTR_INT (1 << 0)
+# define WUSBINTR_ALL 0x3ff
+
+#define WUSBGENCMDSTS 0x14
+# define WUSBGENCMDSTS_ACTIVE (1 << 31)
+# define WUSBGENCMDSTS_ERROR (1 << 24)
+# define WUSBGENCMDSTS_IOC (1 << 23)
+# define WUSBGENCMDSTS_MMCIE_ADD 0x01
+# define WUSBGENCMDSTS_MMCIE_RM 0x02
+# define WUSBGENCMDSTS_SET_MAS 0x03
+# define WUSBGENCMDSTS_CHAN_STOP 0x04
+# define WUSBGENCMDSTS_RWP_EN 0x05
+
+#define WUSBGENCMDPARAMS 0x18
+#define WUSBGENADDR 0x20
+#define WUSBASYNCLISTADDR 0x28
+#define WUSBDNTSBUFADDR 0x30
+#define WUSBDEVICEINFOADDR 0x38
+
+#define WUSBSETSECKEYCMD 0x40
+# define WUSBSETSECKEYCMD_SET (1 << 31)
+# define WUSBSETSECKEYCMD_ERASE (1 << 30)
+# define WUSBSETSECKEYCMD_GTK (1 << 8)
+# define WUSBSETSECKEYCMD_IDX(i) ((i) << 0)
+
+#define WUSBTKID 0x44
+#define WUSBSECKEY 0x48
+#define WUSBPERIODICLISTBASE 0x58
+#define WUSBMASINDEX 0x60
+
+#define WUSBDNTSCTRL 0x64
+# define WUSBDNTSCTRL_ACTIVE (1 << 31)
+# define WUSBDNTSCTRL_INTERVAL(i) ((i) << 8)
+# define WUSBDNTSCTRL_SLOTS(s) ((s) << 0)
+
+#define WUSBTIME 0x68
+# define WUSBTIME_CHANNEL_TIME_MASK 0x00ffffff
+
+#define WUSBBPST 0x6c
+#define WUSBDIBUPDATED 0x70
+
+#endif /* #ifndef _WHCI_WHCI_HC_H */
diff --git a/drivers/staging/wusbcore/host/whci/wusb.c b/drivers/staging/wusbcore/host/whci/wusb.c
new file mode 100644
index 000000000000..6d0068ab35e4
--- /dev/null
+++ b/drivers/staging/wusbcore/host/whci/wusb.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless Host Controller (WHC) WUSB operations.
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+
+#include "../../../uwb/include/umc.h"
+#include "../../wusbhc.h"
+
+#include "whcd.h"
+
+static int whc_update_di(struct whc *whc, int idx)
+{
+ int offset = idx / 32;
+ u32 bit = 1 << (idx % 32);
+
+ le_writel(bit, whc->base + WUSBDIBUPDATED + offset);
+
+ return whci_wait_for(&whc->umc->dev,
+ whc->base + WUSBDIBUPDATED + offset, bit, 0,
+ 100, "DI update");
+}
+
+/*
+ * WHCI starts MMCs based on there being a valid GTK so these need
+ * only start/stop the asynchronous and periodic schedules and send a
+ * channel stop command.
+ */
+
+int whc_wusbhc_start(struct wusbhc *wusbhc)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ asl_start(whc);
+ pzl_start(whc);
+
+ return 0;
+}
+
+void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 stop_time, now_time;
+ int ret;
+
+ pzl_stop(whc);
+ asl_stop(whc);
+
+ now_time = le_readl(whc->base + WUSBTIME) & WUSBTIME_CHANNEL_TIME_MASK;
+ stop_time = (now_time + ((delay * 8) << 7)) & 0x00ffffff;
+ ret = whc_do_gencmd(whc, WUSBGENCMDSTS_CHAN_STOP, stop_time, NULL, 0);
+ if (ret == 0)
+ msleep(delay);
+}
+
+int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+ u8 handle, struct wuie_hdr *wuie)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 params;
+
+ params = (interval << 24)
+ | (repeat_cnt << 16)
+ | (wuie->bLength << 8)
+ | handle;
+
+ return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_ADD, params, wuie, wuie->bLength);
+}
+
+int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 params;
+
+ params = handle;
+
+ return whc_do_gencmd(whc, WUSBGENCMDSTS_MMCIE_RM, params, NULL, 0);
+}
+
+int whc_bwa_set(struct wusbhc *wusbhc, s8 stream_index, const struct uwb_mas_bm *mas_bm)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+
+ if (stream_index >= 0)
+ whc_write_wusbcmd(whc, WUSBCMD_WUSBSI_MASK, WUSBCMD_WUSBSI(stream_index));
+
+ return whc_do_gencmd(whc, WUSBGENCMDSTS_SET_MAS, 0, (void *)mas_bm, sizeof(*mas_bm));
+}
+
+int whc_dev_info_set(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int idx = wusb_dev->port_idx;
+ struct di_buf_entry *di = &whc->di_buf[idx];
+ int ret;
+
+ mutex_lock(&whc->mutex);
+
+ uwb_mas_bm_copy_le(di->availability_info, &wusb_dev->availability);
+ di->addr_sec_info &= ~(WHC_DI_DISABLE | WHC_DI_DEV_ADDR_MASK);
+ di->addr_sec_info |= WHC_DI_DEV_ADDR(wusb_dev->addr);
+
+ ret = whc_update_di(whc, idx);
+
+ mutex_unlock(&whc->mutex);
+
+ return ret;
+}
+
+/*
+ * Set the number of Device Notification Time Slots (DNTS) and enable
+ * device notifications.
+ */
+int whc_set_num_dnts(struct wusbhc *wusbhc, u8 interval, u8 slots)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ u32 dntsctrl;
+
+ dntsctrl = WUSBDNTSCTRL_ACTIVE
+ | WUSBDNTSCTRL_INTERVAL(interval)
+ | WUSBDNTSCTRL_SLOTS(slots);
+
+ le_writel(dntsctrl, whc->base + WUSBDNTSCTRL);
+
+ return 0;
+}
+
+static int whc_set_key(struct whc *whc, u8 key_index, uint32_t tkid,
+ const void *key, size_t key_size, bool is_gtk)
+{
+ uint32_t setkeycmd;
+ uint32_t seckey[4];
+ int i;
+ int ret;
+
+ memcpy(seckey, key, key_size);
+ setkeycmd = WUSBSETSECKEYCMD_SET | WUSBSETSECKEYCMD_IDX(key_index);
+ if (is_gtk)
+ setkeycmd |= WUSBSETSECKEYCMD_GTK;
+
+ le_writel(tkid, whc->base + WUSBTKID);
+ for (i = 0; i < 4; i++)
+ le_writel(seckey[i], whc->base + WUSBSECKEY + 4*i);
+ le_writel(setkeycmd, whc->base + WUSBSETSECKEYCMD);
+
+ ret = whci_wait_for(&whc->umc->dev, whc->base + WUSBSETSECKEYCMD,
+ WUSBSETSECKEYCMD_SET, 0, 100, "set key");
+
+ return ret;
+}
+
+/**
+ * whc_set_ptk - set the PTK to use for a device.
+ *
+ * The index into the key table for this PTK is the same as the
+ * device's port index.
+ */
+int whc_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
+ const void *ptk, size_t key_size)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ struct di_buf_entry *di = &whc->di_buf[port_idx];
+ int ret;
+
+ mutex_lock(&whc->mutex);
+
+ if (ptk) {
+ ret = whc_set_key(whc, port_idx, tkid, ptk, key_size, false);
+ if (ret)
+ goto out;
+
+ di->addr_sec_info &= ~WHC_DI_KEY_IDX_MASK;
+ di->addr_sec_info |= WHC_DI_SECURE | WHC_DI_KEY_IDX(port_idx);
+ } else
+ di->addr_sec_info &= ~WHC_DI_SECURE;
+
+ ret = whc_update_di(whc, port_idx);
+out:
+ mutex_unlock(&whc->mutex);
+ return ret;
+}
+
+/**
+ * whc_set_gtk - set the GTK for subsequent broadcast packets
+ *
+ * The GTK is stored in the last entry in the key table (the previous
+ * N_DEVICES entries are for the per-device PTKs).
+ */
+int whc_set_gtk(struct wusbhc *wusbhc, u32 tkid,
+ const void *gtk, size_t key_size)
+{
+ struct whc *whc = wusbhc_to_whc(wusbhc);
+ int ret;
+
+ mutex_lock(&whc->mutex);
+
+ ret = whc_set_key(whc, whc->n_devices, tkid, gtk, key_size, true);
+
+ mutex_unlock(&whc->mutex);
+
+ return ret;
+}
+
+int whc_set_cluster_id(struct whc *whc, u8 bcid)
+{
+ whc_write_wusbcmd(whc, WUSBCMD_BCID_MASK, WUSBCMD_BCID(bcid));
+ return 0;
+}
diff --git a/drivers/staging/wusbcore/include/association.h b/drivers/staging/wusbcore/include/association.h
new file mode 100644
index 000000000000..d7f3cb9b9db5
--- /dev/null
+++ b/drivers/staging/wusbcore/include/association.h
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB - Cable Based Association
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ */
+#ifndef __LINUX_USB_ASSOCIATION_H
+#define __LINUX_USB_ASSOCIATION_H
+
+
+/*
+ * Association attributes
+ *
+ * Association Models Supplement to WUSB 1.0 T[3-1]
+ *
+ * Each field in the structures has it's ID, it's length and then the
+ * value. This is the actual definition of the field's ID and its
+ * length.
+ */
+struct wusb_am_attr {
+ __u8 id;
+ __u8 len;
+};
+
+/* Different fields defined by the spec */
+#define WUSB_AR_AssociationTypeId { .id = cpu_to_le16(0x0000), .len = cpu_to_le16(2) }
+#define WUSB_AR_AssociationSubTypeId { .id = cpu_to_le16(0x0001), .len = cpu_to_le16(2) }
+#define WUSB_AR_Length { .id = cpu_to_le16(0x0002), .len = cpu_to_le16(4) }
+#define WUSB_AR_AssociationStatus { .id = cpu_to_le16(0x0004), .len = cpu_to_le16(4) }
+#define WUSB_AR_LangID { .id = cpu_to_le16(0x0008), .len = cpu_to_le16(2) }
+#define WUSB_AR_DeviceFriendlyName { .id = cpu_to_le16(0x000b), .len = cpu_to_le16(64) } /* max */
+#define WUSB_AR_HostFriendlyName { .id = cpu_to_le16(0x000c), .len = cpu_to_le16(64) } /* max */
+#define WUSB_AR_CHID { .id = cpu_to_le16(0x1000), .len = cpu_to_le16(16) }
+#define WUSB_AR_CDID { .id = cpu_to_le16(0x1001), .len = cpu_to_le16(16) }
+#define WUSB_AR_ConnectionContext { .id = cpu_to_le16(0x1002), .len = cpu_to_le16(48) }
+#define WUSB_AR_BandGroups { .id = cpu_to_le16(0x1004), .len = cpu_to_le16(2) }
+
+/* CBAF Control Requests (AMS1.0[T4-1] */
+enum {
+ CBAF_REQ_GET_ASSOCIATION_INFORMATION = 0x01,
+ CBAF_REQ_GET_ASSOCIATION_REQUEST,
+ CBAF_REQ_SET_ASSOCIATION_RESPONSE
+};
+
+/*
+ * CBAF USB-interface defitions
+ *
+ * No altsettings, one optional interrupt endpoint.
+ */
+enum {
+ CBAF_IFACECLASS = 0xef,
+ CBAF_IFACESUBCLASS = 0x03,
+ CBAF_IFACEPROTOCOL = 0x01,
+};
+
+/* Association Information (AMS1.0[T4-3]) */
+struct wusb_cbaf_assoc_info {
+ __le16 Length;
+ __u8 NumAssociationRequests;
+ __le16 Flags;
+ __u8 AssociationRequestsArray[];
+} __attribute__((packed));
+
+/* Association Request (AMS1.0[T4-4]) */
+struct wusb_cbaf_assoc_request {
+ __u8 AssociationDataIndex;
+ __u8 Reserved;
+ __le16 AssociationTypeId;
+ __le16 AssociationSubTypeId;
+ __le32 AssociationTypeInfoSize;
+} __attribute__((packed));
+
+enum {
+ AR_TYPE_WUSB = 0x0001,
+ AR_TYPE_WUSB_RETRIEVE_HOST_INFO = 0x0000,
+ AR_TYPE_WUSB_ASSOCIATE = 0x0001,
+};
+
+/* Association Attribute header (AMS1.0[3.8]) */
+struct wusb_cbaf_attr_hdr {
+ __le16 id;
+ __le16 len;
+} __attribute__((packed));
+
+/* Host Info (AMS1.0[T4-7]) (yeah, more headers and fields...) */
+struct wusb_cbaf_host_info {
+ struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+ __le16 AssociationTypeId;
+ struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+ __le16 AssociationSubTypeId;
+ struct wusb_cbaf_attr_hdr CHID_hdr;
+ struct wusb_ckhdid CHID;
+ struct wusb_cbaf_attr_hdr LangID_hdr;
+ __le16 LangID;
+ struct wusb_cbaf_attr_hdr HostFriendlyName_hdr;
+ __u8 HostFriendlyName[];
+} __attribute__((packed));
+
+/* Device Info (AMS1.0[T4-8])
+ *
+ * I still don't get this tag'n'header stuff for each goddamn
+ * field...
+ */
+struct wusb_cbaf_device_info {
+ struct wusb_cbaf_attr_hdr Length_hdr;
+ __le32 Length;
+ struct wusb_cbaf_attr_hdr CDID_hdr;
+ struct wusb_ckhdid CDID;
+ struct wusb_cbaf_attr_hdr BandGroups_hdr;
+ __le16 BandGroups;
+ struct wusb_cbaf_attr_hdr LangID_hdr;
+ __le16 LangID;
+ struct wusb_cbaf_attr_hdr DeviceFriendlyName_hdr;
+ __u8 DeviceFriendlyName[];
+} __attribute__((packed));
+
+/* Connection Context; CC_DATA - Success case (AMS1.0[T4-9]) */
+struct wusb_cbaf_cc_data {
+ struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+ __le16 AssociationTypeId;
+ struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+ __le16 AssociationSubTypeId;
+ struct wusb_cbaf_attr_hdr Length_hdr;
+ __le32 Length;
+ struct wusb_cbaf_attr_hdr ConnectionContext_hdr;
+ struct wusb_ckhdid CHID;
+ struct wusb_ckhdid CDID;
+ struct wusb_ckhdid CK;
+ struct wusb_cbaf_attr_hdr BandGroups_hdr;
+ __le16 BandGroups;
+} __attribute__((packed));
+
+/* CC_DATA - Failure case (AMS1.0[T4-10]) */
+struct wusb_cbaf_cc_data_fail {
+ struct wusb_cbaf_attr_hdr AssociationTypeId_hdr;
+ __le16 AssociationTypeId;
+ struct wusb_cbaf_attr_hdr AssociationSubTypeId_hdr;
+ __le16 AssociationSubTypeId;
+ struct wusb_cbaf_attr_hdr Length_hdr;
+ __le16 Length;
+ struct wusb_cbaf_attr_hdr AssociationStatus_hdr;
+ __u32 AssociationStatus;
+} __attribute__((packed));
+
+#endif /* __LINUX_USB_ASSOCIATION_H */
diff --git a/drivers/staging/wusbcore/include/wusb-wa.h b/drivers/staging/wusbcore/include/wusb-wa.h
new file mode 100644
index 000000000000..64a840b5106e
--- /dev/null
+++ b/drivers/staging/wusbcore/include/wusb-wa.h
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB Wire Adapter constants and structures.
+ *
+ * Copyright (C) 2005-2006 Intel Corporation.
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: organize properly, group logically
+ *
+ * All the event structures are defined in uwb/spec.h, as they are
+ * common to the WHCI and WUSB radio control interfaces.
+ *
+ * References:
+ * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8
+ */
+#ifndef __LINUX_USB_WUSB_WA_H
+#define __LINUX_USB_WUSB_WA_H
+
+/**
+ * Radio Command Request for the Radio Control Interface
+ *
+ * Radio Control Interface command and event codes are the same as
+ * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_*
+ */
+enum {
+ WA_EXEC_RC_CMD = 40, /* Radio Control command Request */
+};
+
+/* Wireless Adapter Requests ([WUSB] table 8-51) */
+enum {
+ WUSB_REQ_ADD_MMC_IE = 20,
+ WUSB_REQ_REMOVE_MMC_IE = 21,
+ WUSB_REQ_SET_NUM_DNTS = 22,
+ WUSB_REQ_SET_CLUSTER_ID = 23,
+ WUSB_REQ_SET_DEV_INFO = 24,
+ WUSB_REQ_GET_TIME = 25,
+ WUSB_REQ_SET_STREAM_IDX = 26,
+ WUSB_REQ_SET_WUSB_MAS = 27,
+ WUSB_REQ_CHAN_STOP = 28,
+};
+
+
+/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */
+enum {
+ WUSB_TIME_ADJ = 0,
+ WUSB_TIME_BPST = 1,
+ WUSB_TIME_WUSB = 2,
+};
+
+enum {
+ WA_ENABLE = 0x01,
+ WA_RESET = 0x02,
+ RPIPE_PAUSE = 0x1,
+ RPIPE_STALL = 0x2,
+};
+
+/* Responses from Get Status request ([WUSB] section 8.3.1.6) */
+enum {
+ WA_STATUS_ENABLED = 0x01,
+ WA_STATUS_RESETTING = 0x02
+};
+
+enum rpipe_crs {
+ RPIPE_CRS_CTL = 0x01,
+ RPIPE_CRS_ISO = 0x02,
+ RPIPE_CRS_BULK = 0x04,
+ RPIPE_CRS_INTR = 0x08
+};
+
+/**
+ * RPipe descriptor ([WUSB] section 8.5.2.11)
+ *
+ * FIXME: explain rpipes
+ */
+struct usb_rpipe_descriptor {
+ u8 bLength;
+ u8 bDescriptorType;
+ __le16 wRPipeIndex;
+ __le16 wRequests;
+ __le16 wBlocks; /* rw if 0 */
+ __le16 wMaxPacketSize; /* rw */
+ union {
+ u8 dwa_bHSHubAddress; /* rw: DWA. */
+ u8 hwa_bMaxBurst; /* rw: HWA. */
+ };
+ union {
+ u8 dwa_bHSHubPort; /* rw: DWA. */
+ u8 hwa_bDeviceInfoIndex; /* rw: HWA. */
+ };
+ u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */
+ union {
+ u8 dwa_bDeviceAddress; /* rw: DWA Target device address. */
+ u8 hwa_reserved; /* rw: HWA. */
+ };
+ u8 bEndpointAddress; /* rw: Target EP address */
+ u8 bDataSequence; /* ro: Current Data sequence */
+ __le32 dwCurrentWindow; /* ro */
+ u8 bMaxDataSequence; /* ro?: max supported seq */
+ u8 bInterval; /* rw: */
+ u8 bOverTheAirInterval; /* rw: */
+ u8 bmAttribute; /* ro? */
+ u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */
+ u8 bmRetryOptions; /* rw? */
+ __le16 wNumTransactionErrors; /* rw */
+} __attribute__ ((packed));
+
+/**
+ * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4)
+ *
+ * These are the notifications coming on the notification endpoint of
+ * an HWA and a DWA.
+ */
+enum wa_notif_type {
+ DWA_NOTIF_RWAKE = 0x91,
+ DWA_NOTIF_PORTSTATUS = 0x92,
+ WA_NOTIF_TRANSFER = 0x93,
+ HWA_NOTIF_BPST_ADJ = 0x94,
+ HWA_NOTIF_DN = 0x95,
+};
+
+/**
+ * Wire Adapter notification header
+ *
+ * Notifications coming from a wire adapter use a common header
+ * defined in [WUSB] sections 8.4.5 & 8.5.4.
+ */
+struct wa_notif_hdr {
+ u8 bLength;
+ u8 bNotifyType; /* enum wa_notif_type */
+} __packed;
+
+/**
+ * HWA DN Received notification [(WUSB] section 8.5.4.2)
+ *
+ * The DNData is specified in WUSB1.0[7.6]. For each device
+ * notification we received, we just need to dispatch it.
+ *
+ * @dndata: this is really an array of notifications, but all start
+ * with the same header.
+ */
+struct hwa_notif_dn {
+ struct wa_notif_hdr hdr;
+ u8 bSourceDeviceAddr; /* from errata 2005/07 */
+ u8 bmAttributes;
+ struct wusb_dn_hdr dndata[];
+} __packed;
+
+/* [WUSB] section 8.3.3 */
+enum wa_xfer_type {
+ WA_XFER_TYPE_CTL = 0x80,
+ WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */
+ WA_XFER_TYPE_ISO = 0x82,
+ WA_XFER_RESULT = 0x83,
+ WA_XFER_ABORT = 0x84,
+ WA_XFER_ISO_PACKET_INFO = 0xA0,
+ WA_XFER_ISO_PACKET_STATUS = 0xA1,
+};
+
+/* [WUSB] section 8.3.3 */
+struct wa_xfer_hdr {
+ u8 bLength; /* 0x18 */
+ u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */
+ __le16 wRPipe; /* RPipe index */
+ __le32 dwTransferID; /* Host-assigned ID */
+ __le32 dwTransferLength; /* Length of data to xfer */
+ u8 bTransferSegment;
+} __packed;
+
+struct wa_xfer_ctl {
+ struct wa_xfer_hdr hdr;
+ u8 bmAttribute;
+ __le16 wReserved;
+ struct usb_ctrlrequest baSetupData;
+} __packed;
+
+struct wa_xfer_bi {
+ struct wa_xfer_hdr hdr;
+ u8 bReserved;
+ __le16 wReserved;
+} __packed;
+
+/* [WUSB] section 8.5.5 */
+struct wa_xfer_hwaiso {
+ struct wa_xfer_hdr hdr;
+ u8 bReserved;
+ __le16 wPresentationTime;
+ __le32 dwNumOfPackets;
+} __packed;
+
+struct wa_xfer_packet_info_hwaiso {
+ __le16 wLength;
+ u8 bPacketType;
+ u8 bReserved;
+ __le16 PacketLength[0];
+} __packed;
+
+struct wa_xfer_packet_status_len_hwaiso {
+ __le16 PacketLength;
+ __le16 PacketStatus;
+} __packed;
+
+struct wa_xfer_packet_status_hwaiso {
+ __le16 wLength;
+ u8 bPacketType;
+ u8 bReserved;
+ struct wa_xfer_packet_status_len_hwaiso PacketStatus[0];
+} __packed;
+
+/* [WUSB] section 8.3.3.5 */
+struct wa_xfer_abort {
+ u8 bLength;
+ u8 bRequestType;
+ __le16 wRPipe; /* RPipe index */
+ __le32 dwTransferID; /* Host-assigned ID */
+} __packed;
+
+/**
+ * WA Transfer Complete notification ([WUSB] section 8.3.3.3)
+ *
+ */
+struct wa_notif_xfer {
+ struct wa_notif_hdr hdr;
+ u8 bEndpoint;
+ u8 Reserved;
+} __packed;
+
+/** Transfer result basic codes [WUSB] table 8-15 */
+enum {
+ WA_XFER_STATUS_SUCCESS,
+ WA_XFER_STATUS_HALTED,
+ WA_XFER_STATUS_DATA_BUFFER_ERROR,
+ WA_XFER_STATUS_BABBLE,
+ WA_XFER_RESERVED,
+ WA_XFER_STATUS_NOT_FOUND,
+ WA_XFER_STATUS_INSUFFICIENT_RESOURCE,
+ WA_XFER_STATUS_TRANSACTION_ERROR,
+ WA_XFER_STATUS_ABORTED,
+ WA_XFER_STATUS_RPIPE_NOT_READY,
+ WA_XFER_INVALID_FORMAT,
+ WA_XFER_UNEXPECTED_SEGMENT_NUMBER,
+ WA_XFER_STATUS_RPIPE_TYPE_MISMATCH,
+};
+
+/** [WUSB] section 8.3.3.4 */
+struct wa_xfer_result {
+ struct wa_notif_hdr hdr;
+ __le32 dwTransferID;
+ __le32 dwTransferLength;
+ u8 bTransferSegment;
+ u8 bTransferStatus;
+ __le32 dwNumOfPackets;
+} __packed;
+
+/**
+ * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7).
+ *
+ * NOTE: u16 fields are read Little Endian from the hardware.
+ *
+ * @bNumPorts is the original max number of devices that the host can
+ * connect; we might chop this so the stack can handle
+ * it. In case you need to access it, use wusbhc->ports_max
+ * if it is a Wireless USB WA.
+ */
+struct usb_wa_descriptor {
+ u8 bLength;
+ u8 bDescriptorType;
+ __le16 bcdWAVersion;
+ u8 bNumPorts; /* don't use!! */
+ u8 bmAttributes; /* Reserved == 0 */
+ __le16 wNumRPipes;
+ __le16 wRPipeMaxBlock;
+ u8 bRPipeBlockSize;
+ u8 bPwrOn2PwrGood;
+ u8 bNumMMCIEs;
+ u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */
+} __packed;
+
+/**
+ * HWA Device Information Buffer (WUSB1.0[T8.54])
+ */
+struct hwa_dev_info {
+ u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */
+ u8 bDeviceAddress;
+ __le16 wPHYRates;
+ u8 bmDeviceAttribute;
+} __packed;
+
+#endif /* #ifndef __LINUX_USB_WUSB_WA_H */
diff --git a/drivers/staging/wusbcore/include/wusb.h b/drivers/staging/wusbcore/include/wusb.h
new file mode 100644
index 000000000000..09771d1da7bc
--- /dev/null
+++ b/drivers/staging/wusbcore/include/wusb.h
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB Standard Definitions
+ * Event Size Tables
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ *
+ *
+ * FIXME: docs
+ * FIXME: organize properly, group logically
+ *
+ * All the event structures are defined in uwb/spec.h, as they are
+ * common to the WHCI and WUSB radio control interfaces.
+ */
+
+#ifndef __WUSB_H__
+#define __WUSB_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/usb/ch9.h>
+#include <linux/param.h>
+#include "../../uwb/include/spec.h"
+
+/**
+ * WUSB Information Element header
+ *
+ * I don't know why, they decided to make it different to the MBOA MAC
+ * IE Header; beats me.
+ */
+struct wuie_hdr {
+ u8 bLength;
+ u8 bIEIdentifier;
+} __attribute__((packed));
+
+enum {
+ WUIE_ID_WCTA = 0x80,
+ WUIE_ID_CONNECTACK,
+ WUIE_ID_HOST_INFO,
+ WUIE_ID_CHANGE_ANNOUNCE,
+ WUIE_ID_DEVICE_DISCONNECT,
+ WUIE_ID_HOST_DISCONNECT,
+ WUIE_ID_KEEP_ALIVE = 0x89,
+ WUIE_ID_ISOCH_DISCARD,
+ WUIE_ID_RESET_DEVICE,
+};
+
+/**
+ * Maximum number of array elements in a WUSB IE.
+ *
+ * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that
+ * are "arrays" have to limited to 4 elements. So we define it
+ * like that to ease up and submit only the neeed size.
+ */
+#define WUIE_ELT_MAX 4
+
+/**
+ * Wrapper for the data that defines a CHID, a CDID or a CK
+ *
+ * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of
+ * data. In order to avoid confusion and enforce types, we wrap it.
+ *
+ * Make it packed, as we use it in some hw definitions.
+ */
+struct wusb_ckhdid {
+ u8 data[16];
+} __attribute__((packed));
+
+static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
+
+#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
+
+/**
+ * WUSB IE: Host Information (WUSB1.0[7.5.2])
+ *
+ * Used to provide information about the host to the Wireless USB
+ * devices in range (CHID can be used as an ASCII string).
+ */
+struct wuie_host_info {
+ struct wuie_hdr hdr;
+ __le16 attributes;
+ struct wusb_ckhdid CHID;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Connect Ack (WUSB1.0[7.5.1])
+ *
+ * Used to acknowledge device connect requests. See note for
+ * WUIE_ELT_MAX.
+ */
+struct wuie_connect_ack {
+ struct wuie_hdr hdr;
+ struct {
+ struct wusb_ckhdid CDID;
+ u8 bDeviceAddress; /* 0 means unused */
+ u8 bReserved;
+ } blk[WUIE_ELT_MAX];
+} __attribute__((packed));
+
+/**
+ * WUSB IE Host Information Element, Connect Availability
+ *
+ * WUSB1.0[7.5.2], bmAttributes description
+ */
+enum {
+ WUIE_HI_CAP_RECONNECT = 0,
+ WUIE_HI_CAP_LIMITED,
+ WUIE_HI_CAP_RESERVED,
+ WUIE_HI_CAP_ALL,
+};
+
+/**
+ * WUSB IE: Channel Stop (WUSB1.0[7.5.8])
+ *
+ * Tells devices the host is going to stop sending MMCs and will disappear.
+ */
+struct wuie_channel_stop {
+ struct wuie_hdr hdr;
+ u8 attributes;
+ u8 timestamp[3];
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Keepalive (WUSB1.0[7.5.9])
+ *
+ * Ask device(s) to send keepalives.
+ */
+struct wuie_keep_alive {
+ struct wuie_hdr hdr;
+ u8 bDeviceAddress[WUIE_ELT_MAX];
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Reset device (WUSB1.0[7.5.11])
+ *
+ * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only
+ * use it for one at the time...
+ *
+ * In any case, this request is a wee bit silly: why don't they target
+ * by address??
+ */
+struct wuie_reset {
+ struct wuie_hdr hdr;
+ struct wusb_ckhdid CDID;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Disconnect device (WUSB1.0[7.5.11])
+ *
+ * Tell device to disconnect; we can fit 4 addresses, but we only use
+ * it for one at the time...
+ */
+struct wuie_disconnect {
+ struct wuie_hdr hdr;
+ u8 bDeviceAddress;
+ u8 padding;
+} __attribute__((packed));
+
+/**
+ * WUSB IE: Host disconnect ([WUSB] section 7.5.5)
+ *
+ * Tells all connected devices to disconnect.
+ */
+struct wuie_host_disconnect {
+ struct wuie_hdr hdr;
+} __attribute__((packed));
+
+/**
+ * WUSB Device Notification header (WUSB1.0[7.6])
+ */
+struct wusb_dn_hdr {
+ u8 bType;
+ u8 notifdata[];
+} __attribute__((packed));
+
+/** Device Notification codes (WUSB1.0[Table 7-54]) */
+enum WUSB_DN {
+ WUSB_DN_CONNECT = 0x01,
+ WUSB_DN_DISCONNECT = 0x02,
+ WUSB_DN_EPRDY = 0x03,
+ WUSB_DN_MASAVAILCHANGED = 0x04,
+ WUSB_DN_RWAKE = 0x05,
+ WUSB_DN_SLEEP = 0x06,
+ WUSB_DN_ALIVE = 0x07,
+};
+
+/** WUSB Device Notification Connect */
+struct wusb_dn_connect {
+ struct wusb_dn_hdr hdr;
+ __le16 attributes;
+ struct wusb_ckhdid CDID;
+} __attribute__((packed));
+
+static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn)
+{
+ return le16_to_cpu(dn->attributes) & 0xff;
+}
+
+static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn)
+{
+ return (le16_to_cpu(dn->attributes) >> 8) & 0x1;
+}
+
+static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn)
+{
+ return (le16_to_cpu(dn->attributes) >> 9) & 0x03;
+}
+
+/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */
+struct wusb_dn_alive {
+ struct wusb_dn_hdr hdr;
+} __attribute__((packed));
+
+/** Device is disconnecting (WUSB1.0[7.6.2]) */
+struct wusb_dn_disconnect {
+ struct wusb_dn_hdr hdr;
+} __attribute__((packed));
+
+/* General constants */
+enum {
+ WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */
+};
+
+/*
+ * WUSB Crypto stuff (WUSB1.0[6])
+ */
+
+extern const char *wusb_et_name(u8);
+
+/**
+ * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for
+ * the host or the device.
+ */
+static inline u8 wusb_key_index(int index, int type, int originator)
+{
+ return (originator << 6) | (type << 4) | index;
+}
+
+#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */
+#define WUSB_KEY_INDEX_TYPE_ASSOC 1
+#define WUSB_KEY_INDEX_TYPE_GTK 2
+#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
+#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
+/* bits 0-3 used for the key index. */
+#define WUSB_KEY_INDEX_MAX 15
+
+/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
+struct aes_ccm_nonce {
+ u8 sfn[6]; /* Little Endian */
+ u8 tkid[3]; /* LE */
+ struct uwb_dev_addr dest_addr;
+ struct uwb_dev_addr src_addr;
+} __attribute__((packed));
+
+/* A CCM operation label, defined on WUSB1.0[6.5.x] */
+struct aes_ccm_label {
+ u8 data[14];
+} __attribute__((packed));
+
+/*
+ * Input to the key derivation sequence defined in
+ * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the
+ * PRF function.
+ */
+struct wusb_keydvt_in {
+ u8 hnonce[16];
+ u8 dnonce[16];
+} __attribute__((packed));
+
+/*
+ * Output from the key derivation sequence defined in
+ * WUSB1.0[6.5.1].
+ */
+struct wusb_keydvt_out {
+ u8 kck[16];
+ u8 ptk[16];
+} __attribute__((packed));
+
+/* Pseudo Random Function WUSB1.0[6.5] */
+extern int wusb_crypto_init(void);
+extern void wusb_crypto_exit(void);
+extern ssize_t wusb_prf(void *out, size_t out_size,
+ const u8 key[16], const struct aes_ccm_nonce *_n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen, size_t len);
+
+static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 64);
+}
+
+static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 128);
+}
+
+static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct aes_ccm_label *a,
+ const void *b, size_t blen)
+{
+ return wusb_prf(out, out_size, key, n, a, b, blen, 256);
+}
+
+/* Key derivation WUSB1.0[6.5.1] */
+static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out,
+ const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct wusb_keydvt_in *keydvt_in)
+{
+ const struct aes_ccm_label a = { .data = "Pair-wise keys" };
+ return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a,
+ keydvt_in, sizeof(*keydvt_in));
+}
+
+/*
+ * Out-of-band MIC Generation WUSB1.0[6.5.2]
+ *
+ * Compute the MIC over @key, @n and @hs and place it in @mic_out.
+ *
+ * @mic_out: Where to place the 8 byte MIC tag
+ * @key: KCK from the derivation process
+ * @n: CCM nonce, n->sfn == 0, TKID as established in the
+ * process.
+ * @hs: Handshake struct for phase 2 of the 4-way.
+ * hs->bStatus and hs->bReserved are zero.
+ * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2]
+ * hs->dest_addr is the device's USB address padded with 0
+ * hs->src_addr is the hosts's UWB device address
+ * hs->mic is ignored (as we compute that value).
+ */
+static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16],
+ const struct aes_ccm_nonce *n,
+ const struct usb_handshake *hs)
+{
+ const struct aes_ccm_label a = { .data = "out-of-bandMIC" };
+ return wusb_prf_64(mic_out, 8, key, n, &a,
+ hs, sizeof(*hs) - sizeof(hs->MIC));
+}
+
+#endif /* #ifndef __WUSB_H__ */
diff --git a/drivers/staging/wusbcore/mmc.c b/drivers/staging/wusbcore/mmc.c
new file mode 100644
index 000000000000..881e1f20d718
--- /dev/null
+++ b/drivers/staging/wusbcore/mmc.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
+ * MMC (Microscheduled Management Command) handling
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * WUIEs and MMC IEs...well, they are almost the same at the end. MMC
+ * IEs are Wireless USB IEs that go into the MMC period...[what is
+ * that? look in Design-overview.txt].
+ *
+ *
+ * This is a simple subsystem to keep track of which IEs are being
+ * sent by the host in the MMC period.
+ *
+ * For each WUIE we ask to send, we keep it in an array, so we can
+ * request its removal later, or replace the content. They are tracked
+ * by pointer, so be sure to use the same pointer if you want to
+ * remove it or update the contents.
+ *
+ * FIXME:
+ * - add timers that autoremove intervalled IEs?
+ */
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "include/wusb.h"
+#include "wusbhc.h"
+
+/* Initialize the MMCIEs handling mechanism */
+int wusbhc_mmcie_create(struct wusbhc *wusbhc)
+{
+ u8 mmcies = wusbhc->mmcies_max;
+ wusbhc->mmcie = kcalloc(mmcies, sizeof(wusbhc->mmcie[0]), GFP_KERNEL);
+ if (wusbhc->mmcie == NULL)
+ return -ENOMEM;
+ mutex_init(&wusbhc->mmcie_mutex);
+ return 0;
+}
+
+/* Release resources used by the MMCIEs handling mechanism */
+void wusbhc_mmcie_destroy(struct wusbhc *wusbhc)
+{
+ kfree(wusbhc->mmcie);
+}
+
+/*
+ * Add or replace an MMC Wireless USB IE.
+ *
+ * @interval: See WUSB1.0[8.5.3.1]
+ * @repeat_cnt: See WUSB1.0[8.5.3.1]
+ * @handle: See WUSB1.0[8.5.3.1]
+ * @wuie: Pointer to the header of the WUSB IE data to add.
+ * MUST BE allocated in a kmalloc buffer (no stack or
+ * vmalloc).
+ * THE CALLER ALWAYS OWNS THE POINTER (we don't free it
+ * on remove, we just forget about it).
+ * @returns: 0 if ok, < 0 errno code on error.
+ *
+ * Goes over the *whole* @wusbhc->mmcie array looking for (a) the
+ * first free spot and (b) if @wuie is already in the array (aka:
+ * transmitted in the MMCs) the spot were it is.
+ *
+ * If present, we "overwrite it" (update).
+ *
+ *
+ * NOTE: Need special ordering rules -- see below WUSB1.0 Table 7-38.
+ * The host uses the handle as the 'sort' index. We
+ * allocate the last one always for the WUIE_ID_HOST_INFO, and
+ * the rest, first come first serve in inverse order.
+ *
+ * Host software must make sure that it adds the other IEs in
+ * the right order... the host hardware is responsible for
+ * placing the WCTA IEs in the right place with the other IEs
+ * set by host software.
+ *
+ * NOTE: we can access wusbhc->wa_descr without locking because it is
+ * read only.
+ */
+int wusbhc_mmcie_set(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+ struct wuie_hdr *wuie)
+{
+ int result = -ENOBUFS;
+ unsigned handle, itr;
+
+ /* Search a handle, taking into account the ordering */
+ mutex_lock(&wusbhc->mmcie_mutex);
+ switch (wuie->bIEIdentifier) {
+ case WUIE_ID_HOST_INFO:
+ /* Always last */
+ handle = wusbhc->mmcies_max - 1;
+ break;
+ case WUIE_ID_ISOCH_DISCARD:
+ dev_err(wusbhc->dev, "Special ordering case for WUIE ID 0x%x "
+ "unimplemented\n", wuie->bIEIdentifier);
+ result = -ENOSYS;
+ goto error_unlock;
+ default:
+ /* search for it or find the last empty slot */
+ handle = ~0;
+ for (itr = 0; itr < wusbhc->mmcies_max - 1; itr++) {
+ if (wusbhc->mmcie[itr] == wuie) {
+ handle = itr;
+ break;
+ }
+ if (wusbhc->mmcie[itr] == NULL)
+ handle = itr;
+ }
+ if (handle == ~0)
+ goto error_unlock;
+ }
+ result = (wusbhc->mmcie_add)(wusbhc, interval, repeat_cnt, handle,
+ wuie);
+ if (result >= 0)
+ wusbhc->mmcie[handle] = wuie;
+error_unlock:
+ mutex_unlock(&wusbhc->mmcie_mutex);
+ return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_mmcie_set);
+
+/*
+ * Remove an MMC IE previously added with wusbhc_mmcie_set()
+ *
+ * @wuie Pointer used to add the WUIE
+ */
+void wusbhc_mmcie_rm(struct wusbhc *wusbhc, struct wuie_hdr *wuie)
+{
+ int result;
+ unsigned handle, itr;
+
+ mutex_lock(&wusbhc->mmcie_mutex);
+ for (itr = 0; itr < wusbhc->mmcies_max; itr++) {
+ if (wusbhc->mmcie[itr] == wuie) {
+ handle = itr;
+ goto found;
+ }
+ }
+ mutex_unlock(&wusbhc->mmcie_mutex);
+ return;
+
+found:
+ result = (wusbhc->mmcie_rm)(wusbhc, handle);
+ if (result == 0)
+ wusbhc->mmcie[itr] = NULL;
+ mutex_unlock(&wusbhc->mmcie_mutex);
+}
+EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm);
+
+static int wusbhc_mmc_start(struct wusbhc *wusbhc)
+{
+ int ret;
+
+ mutex_lock(&wusbhc->mutex);
+ ret = wusbhc->start(wusbhc);
+ if (ret >= 0)
+ wusbhc->active = 1;
+ mutex_unlock(&wusbhc->mutex);
+
+ return ret;
+}
+
+static void wusbhc_mmc_stop(struct wusbhc *wusbhc)
+{
+ mutex_lock(&wusbhc->mutex);
+ wusbhc->active = 0;
+ wusbhc->stop(wusbhc, WUSB_CHANNEL_STOP_DELAY_MS);
+ mutex_unlock(&wusbhc->mutex);
+}
+
+/*
+ * wusbhc_start - start transmitting MMCs and accepting connections
+ * @wusbhc: the HC to start
+ *
+ * Establishes a cluster reservation, enables device connections, and
+ * starts MMCs with appropriate DNTS parameters.
+ */
+int wusbhc_start(struct wusbhc *wusbhc)
+{
+ int result;
+ struct device *dev = wusbhc->dev;
+
+ WARN_ON(wusbhc->wuie_host_info != NULL);
+ BUG_ON(wusbhc->uwb_rc == NULL);
+
+ result = wusbhc_rsv_establish(wusbhc);
+ if (result < 0) {
+ dev_err(dev, "cannot establish cluster reservation: %d\n",
+ result);
+ goto error_rsv_establish;
+ }
+
+ result = wusbhc_devconnect_start(wusbhc);
+ if (result < 0) {
+ dev_err(dev, "error enabling device connections: %d\n",
+ result);
+ goto error_devconnect_start;
+ }
+
+ result = wusbhc_sec_start(wusbhc);
+ if (result < 0) {
+ dev_err(dev, "error starting security in the HC: %d\n",
+ result);
+ goto error_sec_start;
+ }
+
+ result = wusbhc->set_num_dnts(wusbhc, wusbhc->dnts_interval,
+ wusbhc->dnts_num_slots);
+ if (result < 0) {
+ dev_err(dev, "Cannot set DNTS parameters: %d\n", result);
+ goto error_set_num_dnts;
+ }
+ result = wusbhc_mmc_start(wusbhc);
+ if (result < 0) {
+ dev_err(dev, "error starting wusbch: %d\n", result);
+ goto error_wusbhc_start;
+ }
+
+ return 0;
+
+error_wusbhc_start:
+ wusbhc_sec_stop(wusbhc);
+error_set_num_dnts:
+error_sec_start:
+ wusbhc_devconnect_stop(wusbhc);
+error_devconnect_start:
+ wusbhc_rsv_terminate(wusbhc);
+error_rsv_establish:
+ return result;
+}
+
+/*
+ * wusbhc_stop - stop transmitting MMCs
+ * @wusbhc: the HC to stop
+ *
+ * Stops the WUSB channel and removes the cluster reservation.
+ */
+void wusbhc_stop(struct wusbhc *wusbhc)
+{
+ wusbhc_mmc_stop(wusbhc);
+ wusbhc_sec_stop(wusbhc);
+ wusbhc_devconnect_stop(wusbhc);
+ wusbhc_rsv_terminate(wusbhc);
+}
+
+/*
+ * Set/reset/update a new CHID
+ *
+ * Depending on the previous state of the MMCs, start, stop or change
+ * the sent MMC. This effectively switches the host controller on and
+ * off (radio wise).
+ */
+int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
+{
+ int result = 0;
+
+ if (memcmp(chid, &wusb_ckhdid_zero, sizeof(*chid)) == 0)
+ chid = NULL;
+
+ mutex_lock(&wusbhc->mutex);
+ if (chid) {
+ if (wusbhc->active) {
+ mutex_unlock(&wusbhc->mutex);
+ return -EBUSY;
+ }
+ wusbhc->chid = *chid;
+ }
+
+ /* register with UWB if we haven't already since we are about to start
+ the radio. */
+ if ((chid) && (wusbhc->uwb_rc == NULL)) {
+ wusbhc->uwb_rc = uwb_rc_get_by_grandpa(wusbhc->dev->parent);
+ if (wusbhc->uwb_rc == NULL) {
+ result = -ENODEV;
+ dev_err(wusbhc->dev,
+ "Cannot get associated UWB Host Controller\n");
+ goto error_rc_get;
+ }
+
+ result = wusbhc_pal_register(wusbhc);
+ if (result < 0) {
+ dev_err(wusbhc->dev, "Cannot register as a UWB PAL\n");
+ goto error_pal_register;
+ }
+ }
+ mutex_unlock(&wusbhc->mutex);
+
+ if (chid)
+ result = uwb_radio_start(&wusbhc->pal);
+ else if (wusbhc->uwb_rc)
+ uwb_radio_stop(&wusbhc->pal);
+
+ return result;
+
+error_pal_register:
+ uwb_rc_put(wusbhc->uwb_rc);
+ wusbhc->uwb_rc = NULL;
+error_rc_get:
+ mutex_unlock(&wusbhc->mutex);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_chid_set);
diff --git a/drivers/staging/wusbcore/pal.c b/drivers/staging/wusbcore/pal.c
new file mode 100644
index 000000000000..30f569131471
--- /dev/null
+++ b/drivers/staging/wusbcore/pal.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB Host Controller
+ * UWB Protocol Adaptation Layer (PAL) glue.
+ *
+ * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
+ */
+#include "wusbhc.h"
+
+static void wusbhc_channel_changed(struct uwb_pal *pal, int channel)
+{
+ struct wusbhc *wusbhc = container_of(pal, struct wusbhc, pal);
+
+ dev_dbg(wusbhc->dev, "%s: channel = %d\n", __func__, channel);
+ if (channel < 0)
+ wusbhc_stop(wusbhc);
+ else
+ wusbhc_start(wusbhc);
+}
+
+/**
+ * wusbhc_pal_register - register the WUSB HC as a UWB PAL
+ * @wusbhc: the WUSB HC
+ */
+int wusbhc_pal_register(struct wusbhc *wusbhc)
+{
+ uwb_pal_init(&wusbhc->pal);
+
+ wusbhc->pal.name = "wusbhc";
+ wusbhc->pal.device = wusbhc->usb_hcd.self.controller;
+ wusbhc->pal.rc = wusbhc->uwb_rc;
+ wusbhc->pal.channel_changed = wusbhc_channel_changed;
+
+ return uwb_pal_register(&wusbhc->pal);
+}
+
+/**
+ * wusbhc_pal_unregister - unregister the WUSB HC as a UWB PAL
+ * @wusbhc: the WUSB HC
+ */
+void wusbhc_pal_unregister(struct wusbhc *wusbhc)
+{
+ if (wusbhc->uwb_rc)
+ uwb_pal_unregister(&wusbhc->pal);
+}
diff --git a/drivers/staging/wusbcore/reservation.c b/drivers/staging/wusbcore/reservation.c
new file mode 100644
index 000000000000..b921faac698b
--- /dev/null
+++ b/drivers/staging/wusbcore/reservation.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * WUSB cluster reservation management
+ *
+ * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
+ */
+#include <linux/kernel.h>
+
+#include "../uwb/uwb.h"
+#include "wusbhc.h"
+
+/*
+ * WUSB cluster reservations are multicast reservations with the
+ * broadcast cluster ID (BCID) as the target DevAddr.
+ *
+ * FIXME: consider adjusting the reservation depending on what devices
+ * are attached.
+ */
+
+static int wusbhc_bwa_set(struct wusbhc *wusbhc, u8 stream,
+ const struct uwb_mas_bm *mas)
+{
+ if (mas == NULL)
+ mas = &uwb_mas_bm_zero;
+ return wusbhc->bwa_set(wusbhc, stream, mas);
+}
+
+/**
+ * wusbhc_rsv_complete_cb - WUSB HC reservation complete callback
+ * @rsv: the reservation
+ *
+ * Either set or clear the HC's view of the reservation.
+ *
+ * FIXME: when a reservation is denied the HC should be stopped.
+ */
+static void wusbhc_rsv_complete_cb(struct uwb_rsv *rsv)
+{
+ struct wusbhc *wusbhc = rsv->pal_priv;
+ struct device *dev = wusbhc->dev;
+ struct uwb_mas_bm mas;
+
+ dev_dbg(dev, "%s: state = %d\n", __func__, rsv->state);
+ switch (rsv->state) {
+ case UWB_RSV_STATE_O_ESTABLISHED:
+ uwb_rsv_get_usable_mas(rsv, &mas);
+ dev_dbg(dev, "established reservation: %*pb\n",
+ UWB_NUM_MAS, mas.bm);
+ wusbhc_bwa_set(wusbhc, rsv->stream, &mas);
+ break;
+ case UWB_RSV_STATE_NONE:
+ dev_dbg(dev, "removed reservation\n");
+ wusbhc_bwa_set(wusbhc, 0, NULL);
+ break;
+ default:
+ dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state);
+ break;
+ }
+}
+
+
+/**
+ * wusbhc_rsv_establish - establish a reservation for the cluster
+ * @wusbhc: the WUSB HC requesting a bandwidth reservation
+ */
+int wusbhc_rsv_establish(struct wusbhc *wusbhc)
+{
+ struct uwb_rc *rc = wusbhc->uwb_rc;
+ struct uwb_rsv *rsv;
+ struct uwb_dev_addr bcid;
+ int ret;
+
+ if (rc == NULL)
+ return -ENODEV;
+
+ rsv = uwb_rsv_create(rc, wusbhc_rsv_complete_cb, wusbhc);
+ if (rsv == NULL)
+ return -ENOMEM;
+
+ bcid.data[0] = wusbhc->cluster_id;
+ bcid.data[1] = 0;
+
+ rsv->target.type = UWB_RSV_TARGET_DEVADDR;
+ rsv->target.devaddr = bcid;
+ rsv->type = UWB_DRP_TYPE_PRIVATE;
+ rsv->max_mas = 256; /* try to get as much as possible */
+ rsv->min_mas = 15; /* one MAS per zone */
+ rsv->max_interval = 1; /* max latency is one zone */
+ rsv->is_multicast = true;
+
+ ret = uwb_rsv_establish(rsv);
+ if (ret == 0)
+ wusbhc->rsv = rsv;
+ else
+ uwb_rsv_destroy(rsv);
+ return ret;
+}
+
+
+/**
+ * wusbhc_rsv_terminate - terminate the cluster reservation
+ * @wusbhc: the WUSB host whose reservation is to be terminated
+ */
+void wusbhc_rsv_terminate(struct wusbhc *wusbhc)
+{
+ if (wusbhc->rsv) {
+ uwb_rsv_terminate(wusbhc->rsv);
+ uwb_rsv_destroy(wusbhc->rsv);
+ wusbhc->rsv = NULL;
+ }
+}
diff --git a/drivers/staging/wusbcore/rh.c b/drivers/staging/wusbcore/rh.c
new file mode 100644
index 000000000000..20c08cd9dcbf
--- /dev/null
+++ b/drivers/staging/wusbcore/rh.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB Host Controller
+ * Root Hub operations
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * We fake a root hub that has fake ports (as many as simultaneous
+ * devices the Wireless USB Host Controller can deal with). For each
+ * port we keep an state in @wusbhc->port[index] identical to the one
+ * specified in the USB2.0[ch11] spec and some extra device
+ * information that complements the one in 'struct usb_device' (as
+ * this lacs a hcpriv pointer).
+ *
+ * Note this is common to WHCI and HWA host controllers.
+ *
+ * Through here we enable most of the state changes that the USB stack
+ * will use to connect or disconnect devices. We need to do some
+ * forced adaptation of Wireless USB device states vs. wired:
+ *
+ * USB: WUSB:
+ *
+ * Port Powered-off port slot n/a
+ * Powered-on port slot available
+ * Disconnected port slot available
+ * Connected port slot assigned device
+ * device sent DN_Connect
+ * device was authenticated
+ * Enabled device is authenticated, transitioned
+ * from unauth -> auth -> default address
+ * -> enabled
+ * Reset disconnect
+ * Disable disconnect
+ *
+ * This maps the standard USB port states with the WUSB device states
+ * so we can fake ports without having to modify the USB stack.
+ *
+ * FIXME: this process will change in the future
+ *
+ *
+ * ENTRY POINTS
+ *
+ * Our entry points into here are, as in hcd.c, the USB stack root hub
+ * ops defined in the usb_hcd struct:
+ *
+ * wusbhc_rh_status_data() Provide hub and port status data bitmap
+ *
+ * wusbhc_rh_control() Execution of all the major requests
+ * you can do to a hub (Set|Clear
+ * features, get descriptors, status, etc).
+ *
+ * wusbhc_rh_[suspend|resume]() That
+ *
+ * wusbhc_rh_start_port_reset() ??? unimplemented
+ */
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "wusbhc.h"
+
+/*
+ * Reset a fake port
+ *
+ * Using a Reset Device IE is too heavyweight as it causes the device
+ * to enter the UnConnected state and leave the cluster, this can mean
+ * that when the device reconnects it is connected to a different fake
+ * port.
+ *
+ * Instead, reset authenticated devices with a SetAddress(0), followed
+ * by a SetAddresss(AuthAddr).
+ *
+ * For unauthenticated devices just pretend to reset but do nothing.
+ * If the device initialization continues to fail it will eventually
+ * time out after TrustTimeout and enter the UnConnected state.
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * Supposedly we are the only thread accesing @wusbhc->port; in any
+ * case, maybe we should move the mutex locking from
+ * wusbhc_devconnect_auth() to here.
+ *
+ * @port_idx refers to the wusbhc's port index, not the USB port number
+ */
+static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx)
+{
+ int result = 0;
+ struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx);
+ struct wusb_dev *wusb_dev = port->wusb_dev;
+
+ if (wusb_dev == NULL)
+ return -ENOTCONN;
+
+ port->status |= USB_PORT_STAT_RESET;
+ port->change |= USB_PORT_STAT_C_RESET;
+
+ if (wusb_dev->addr & WUSB_DEV_ADDR_UNAUTH)
+ result = 0;
+ else
+ result = wusb_dev_update_address(wusbhc, wusb_dev);
+
+ port->status &= ~USB_PORT_STAT_RESET;
+ port->status |= USB_PORT_STAT_ENABLE;
+ port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE;
+
+ return result;
+}
+
+/*
+ * Return the hub change status bitmap
+ *
+ * The bits in the change status bitmap are cleared when a
+ * ClearPortFeature request is issued (USB2.0[11.12.3,11.12.4].
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * WARNING!! This gets called from atomic context; we cannot get the
+ * mutex--the only race condition we can find is some bit
+ * changing just after we copy it, which shouldn't be too
+ * big of a problem [and we can't make it an spinlock
+ * because other parts need to take it and sleep] .
+ *
+ * @usb_hcd is refcounted, so it won't disappear under us
+ * and before killing a host, the polling of the root hub
+ * would be stopped anyway.
+ */
+int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ size_t cnt, size, bits_set = 0;
+
+ /* WE DON'T LOCK, see comment */
+ /* round up to bytes. Hub bit is bit 0 so add 1. */
+ size = DIV_ROUND_UP(wusbhc->ports_max + 1, 8);
+
+ /* clear the output buffer. */
+ memset(_buf, 0, size);
+ /* set the bit for each changed port. */
+ for (cnt = 0; cnt < wusbhc->ports_max; cnt++) {
+
+ if (wusb_port_by_idx(wusbhc, cnt)->change) {
+ const int bitpos = cnt+1;
+
+ _buf[bitpos/8] |= (1 << (bitpos % 8));
+ bits_set++;
+ }
+ }
+
+ return bits_set ? size : 0;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
+
+/*
+ * Return the hub's descriptor
+ *
+ * NOTE: almost cut and paste from ehci-hub.c
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked
+ */
+static int wusbhc_rh_get_hub_descr(struct wusbhc *wusbhc, u16 wValue,
+ u16 wIndex,
+ struct usb_hub_descriptor *descr,
+ u16 wLength)
+{
+ u16 temp = 1 + (wusbhc->ports_max / 8);
+ u8 length = 7 + 2 * temp;
+
+ if (wLength < length)
+ return -ENOSPC;
+ descr->bDescLength = 7 + 2 * temp;
+ descr->bDescriptorType = USB_DT_HUB; /* HUB type */
+ descr->bNbrPorts = wusbhc->ports_max;
+ descr->wHubCharacteristics = cpu_to_le16(
+ HUB_CHAR_COMMON_LPSM /* All ports power at once */
+ | 0x00 /* not part of compound device */
+ | HUB_CHAR_NO_OCPM /* No overcurrent protection */
+ | 0x00 /* 8 FS think time FIXME ?? */
+ | 0x00); /* No port indicators */
+ descr->bPwrOn2PwrGood = 0;
+ descr->bHubContrCurrent = 0;
+ /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&descr->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&descr->u.hs.DeviceRemovable[temp], 0xff, temp);
+ return 0;
+}
+
+/*
+ * Clear a hub feature
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * Nothing to do, so no locking needed ;)
+ */
+static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature)
+{
+ int result;
+
+ switch (feature) {
+ case C_HUB_LOCAL_POWER:
+ /* FIXME: maybe plug bit 0 to the power input status,
+ * if any?
+ * see wusbhc_rh_get_hub_status() */
+ case C_HUB_OVER_CURRENT:
+ result = 0;
+ break;
+ default:
+ result = -EPIPE;
+ }
+ return result;
+}
+
+/*
+ * Return hub status (it is always zero...)
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ *
+ * Nothing to do, so no locking needed ;)
+ */
+static int wusbhc_rh_get_hub_status(struct wusbhc *wusbhc, u32 *buf,
+ u16 wLength)
+{
+ /* FIXME: maybe plug bit 0 to the power input status (if any)? */
+ *buf = 0;
+ return 0;
+}
+
+/*
+ * Set a port feature
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature,
+ u8 selector, u8 port_idx)
+{
+ struct device *dev = wusbhc->dev;
+
+ if (port_idx > wusbhc->ports_max)
+ return -EINVAL;
+
+ switch (feature) {
+ /* According to USB2.0[11.24.2.13]p2, these features
+ * are not required to be implemented. */
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ return 0;
+ case USB_PORT_FEAT_POWER:
+ /* No such thing, but we fake it works */
+ mutex_lock(&wusbhc->mutex);
+ wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER;
+ mutex_unlock(&wusbhc->mutex);
+ return 0;
+ case USB_PORT_FEAT_RESET:
+ return wusbhc_rh_port_reset(wusbhc, port_idx);
+ case USB_PORT_FEAT_ENABLE:
+ case USB_PORT_FEAT_SUSPEND:
+ dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n",
+ port_idx, feature, selector);
+ return -ENOSYS;
+ default:
+ dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n",
+ port_idx, feature, selector);
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+/*
+ * Clear a port feature...
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature,
+ u8 selector, u8 port_idx)
+{
+ int result = 0;
+ struct device *dev = wusbhc->dev;
+
+ if (port_idx > wusbhc->ports_max)
+ return -EINVAL;
+
+ mutex_lock(&wusbhc->mutex);
+ switch (feature) {
+ case USB_PORT_FEAT_POWER: /* fake port always on */
+ /* According to USB2.0[11.24.2.7.1.4], no need to implement? */
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_RESET;
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_CONNECTION;
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ __wusbhc_dev_disable(wusbhc, port_idx);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ wusb_port_by_idx(wusbhc, port_idx)->change &= ~USB_PORT_STAT_C_ENABLE;
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ case USB_PORT_FEAT_C_SUSPEND:
+ dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n",
+ port_idx, feature, selector);
+ result = -ENOSYS;
+ break;
+ default:
+ dev_err(dev, "(port_idx %d) Clear feat %d/%d UNKNOWN\n",
+ port_idx, feature, selector);
+ result = -EPIPE;
+ break;
+ }
+ mutex_unlock(&wusbhc->mutex);
+
+ return result;
+}
+
+/*
+ * Return the port's status
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx,
+ u32 *_buf, u16 wLength)
+{
+ __le16 *buf = (__le16 *)_buf;
+
+ if (port_idx > wusbhc->ports_max)
+ return -EINVAL;
+
+ mutex_lock(&wusbhc->mutex);
+ buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status);
+ buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change);
+ mutex_unlock(&wusbhc->mutex);
+
+ return 0;
+}
+
+/*
+ * Entry point for Root Hub operations
+ *
+ * @wusbhc is assumed referenced and @wusbhc->mutex unlocked.
+ */
+int wusbhc_rh_control(struct usb_hcd *usb_hcd, u16 reqntype, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ int result = -ENOSYS;
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+
+ switch (reqntype) {
+ case GetHubDescriptor:
+ result = wusbhc_rh_get_hub_descr(
+ wusbhc, wValue, wIndex,
+ (struct usb_hub_descriptor *) buf, wLength);
+ break;
+ case ClearHubFeature:
+ result = wusbhc_rh_clear_hub_feat(wusbhc, wValue);
+ break;
+ case GetHubStatus:
+ result = wusbhc_rh_get_hub_status(wusbhc, (u32 *)buf, wLength);
+ break;
+
+ case SetPortFeature:
+ result = wusbhc_rh_set_port_feat(wusbhc, wValue, wIndex >> 8,
+ (wIndex & 0xff) - 1);
+ break;
+ case ClearPortFeature:
+ result = wusbhc_rh_clear_port_feat(wusbhc, wValue, wIndex >> 8,
+ (wIndex & 0xff) - 1);
+ break;
+ case GetPortStatus:
+ result = wusbhc_rh_get_port_status(wusbhc, wIndex - 1,
+ (u32 *)buf, wLength);
+ break;
+
+ case SetHubFeature:
+ default:
+ dev_err(wusbhc->dev, "%s (%p [%p], %x, %x, %x, %p, %x) "
+ "UNIMPLEMENTED\n", __func__, usb_hcd, wusbhc, reqntype,
+ wValue, wIndex, buf, wLength);
+ /* dump_stack(); */
+ result = -ENOSYS;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_control);
+
+int wusbhc_rh_start_port_reset(struct usb_hcd *usb_hcd, unsigned port_idx)
+{
+ struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+ dev_err(wusbhc->dev, "%s (%p [%p], port_idx %u) UNIMPLEMENTED\n",
+ __func__, usb_hcd, wusbhc, port_idx);
+ WARN_ON(1);
+ return -ENOSYS;
+}
+EXPORT_SYMBOL_GPL(wusbhc_rh_start_port_reset);
+
+static void wusb_port_init(struct wusb_port *port)
+{
+ port->status |= USB_PORT_STAT_HIGH_SPEED;
+}
+
+/*
+ * Alloc fake port specific fields and status.
+ */
+int wusbhc_rh_create(struct wusbhc *wusbhc)
+{
+ int result = -ENOMEM;
+ size_t port_size, itr;
+ port_size = wusbhc->ports_max * sizeof(wusbhc->port[0]);
+ wusbhc->port = kzalloc(port_size, GFP_KERNEL);
+ if (wusbhc->port == NULL)
+ goto error_port_alloc;
+ for (itr = 0; itr < wusbhc->ports_max; itr++)
+ wusb_port_init(&wusbhc->port[itr]);
+ result = 0;
+error_port_alloc:
+ return result;
+}
+
+void wusbhc_rh_destroy(struct wusbhc *wusbhc)
+{
+ kfree(wusbhc->port);
+}
diff --git a/drivers/staging/wusbcore/security.c b/drivers/staging/wusbcore/security.c
new file mode 100644
index 000000000000..14ac8c98ac9e
--- /dev/null
+++ b/drivers/staging/wusbcore/security.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB Host Controller
+ * Security support: encryption enablement, etc
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * FIXME: docs
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/usb/ch9.h>
+#include <linux/random.h>
+#include <linux/export.h>
+#include "wusbhc.h"
+#include <asm/unaligned.h>
+
+static void wusbhc_gtk_rekey_work(struct work_struct *work);
+
+int wusbhc_sec_create(struct wusbhc *wusbhc)
+{
+ /*
+ * WQ is singlethread because we need to serialize rekey operations.
+ * Use a separate workqueue for security operations instead of the
+ * wusbd workqueue because security operations may need to communicate
+ * directly with downstream wireless devices using synchronous URBs.
+ * If a device is not responding, this could block other host
+ * controller operations.
+ */
+ wusbhc->wq_security = create_singlethread_workqueue("wusbd_security");
+ if (wusbhc->wq_security == NULL) {
+ pr_err("WUSB-core: Cannot create wusbd_security workqueue\n");
+ return -ENOMEM;
+ }
+
+ wusbhc->gtk.descr.bLength = sizeof(wusbhc->gtk.descr) +
+ sizeof(wusbhc->gtk.data);
+ wusbhc->gtk.descr.bDescriptorType = USB_DT_KEY;
+ wusbhc->gtk.descr.bReserved = 0;
+ wusbhc->gtk_index = 0;
+
+ INIT_WORK(&wusbhc->gtk_rekey_work, wusbhc_gtk_rekey_work);
+
+ return 0;
+}
+
+
+/* Called when the HC is destroyed */
+void wusbhc_sec_destroy(struct wusbhc *wusbhc)
+{
+ destroy_workqueue(wusbhc->wq_security);
+}
+
+
+/**
+ * wusbhc_next_tkid - generate a new, currently unused, TKID
+ * @wusbhc: the WUSB host controller
+ * @wusb_dev: the device whose PTK the TKID is for
+ * (or NULL for a TKID for a GTK)
+ *
+ * The generated TKID consists of two parts: the device's authenticated
+ * address (or 0 or a GTK); and an incrementing number. This ensures
+ * that TKIDs cannot be shared between devices and by the time the
+ * incrementing number wraps around the older TKIDs will no longer be
+ * in use (a maximum of two keys may be active at any one time).
+ */
+static u32 wusbhc_next_tkid(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ u32 *tkid;
+ u32 addr;
+
+ if (wusb_dev == NULL) {
+ tkid = &wusbhc->gtk_tkid;
+ addr = 0;
+ } else {
+ tkid = &wusb_port_by_idx(wusbhc, wusb_dev->port_idx)->ptk_tkid;
+ addr = wusb_dev->addr & 0x7f;
+ }
+
+ *tkid = (addr << 8) | ((*tkid + 1) & 0xff);
+
+ return *tkid;
+}
+
+static void wusbhc_generate_gtk(struct wusbhc *wusbhc)
+{
+ const size_t key_size = sizeof(wusbhc->gtk.data);
+ u32 tkid;
+
+ tkid = wusbhc_next_tkid(wusbhc, NULL);
+
+ wusbhc->gtk.descr.tTKID[0] = (tkid >> 0) & 0xff;
+ wusbhc->gtk.descr.tTKID[1] = (tkid >> 8) & 0xff;
+ wusbhc->gtk.descr.tTKID[2] = (tkid >> 16) & 0xff;
+
+ get_random_bytes(wusbhc->gtk.descr.bKeyData, key_size);
+}
+
+/**
+ * wusbhc_sec_start - start the security management process
+ * @wusbhc: the WUSB host controller
+ *
+ * Generate and set an initial GTK on the host controller.
+ *
+ * Called when the HC is started.
+ */
+int wusbhc_sec_start(struct wusbhc *wusbhc)
+{
+ const size_t key_size = sizeof(wusbhc->gtk.data);
+ int result;
+
+ wusbhc_generate_gtk(wusbhc);
+
+ result = wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
+ &wusbhc->gtk.descr.bKeyData, key_size);
+ if (result < 0)
+ dev_err(wusbhc->dev, "cannot set GTK for the host: %d\n",
+ result);
+
+ return result;
+}
+
+/**
+ * wusbhc_sec_stop - stop the security management process
+ * @wusbhc: the WUSB host controller
+ *
+ * Wait for any pending GTK rekeys to stop.
+ */
+void wusbhc_sec_stop(struct wusbhc *wusbhc)
+{
+ cancel_work_sync(&wusbhc->gtk_rekey_work);
+}
+
+
+/** @returns encryption type name */
+const char *wusb_et_name(u8 x)
+{
+ switch (x) {
+ case USB_ENC_TYPE_UNSECURE: return "unsecure";
+ case USB_ENC_TYPE_WIRED: return "wired";
+ case USB_ENC_TYPE_CCM_1: return "CCM-1";
+ case USB_ENC_TYPE_RSA_1: return "RSA-1";
+ default: return "unknown";
+ }
+}
+EXPORT_SYMBOL_GPL(wusb_et_name);
+
+/*
+ * Set the device encryption method
+ *
+ * We tell the device which encryption method to use; we do this when
+ * setting up the device's security.
+ */
+static int wusb_dev_set_encryption(struct usb_device *usb_dev, int value)
+{
+ int result;
+ struct device *dev = &usb_dev->dev;
+ struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
+
+ if (value) {
+ value = wusb_dev->ccm1_etd.bEncryptionValue;
+ } else {
+ /* FIXME: should be wusb_dev->etd[UNSECURE].bEncryptionValue */
+ value = 0;
+ }
+ /* Set device's */
+ result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_ENCRYPTION,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ value, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (result < 0)
+ dev_err(dev, "Can't set device's WUSB encryption to "
+ "%s (value %d): %d\n",
+ wusb_et_name(wusb_dev->ccm1_etd.bEncryptionType),
+ wusb_dev->ccm1_etd.bEncryptionValue, result);
+ return result;
+}
+
+/*
+ * Set the GTK to be used by a device.
+ *
+ * The device must be authenticated.
+ */
+static int wusb_dev_set_gtk(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ struct usb_device *usb_dev = wusb_dev->usb_dev;
+ u8 key_index = wusb_key_index(wusbhc->gtk_index,
+ WUSB_KEY_INDEX_TYPE_GTK, WUSB_KEY_INDEX_ORIGINATOR_HOST);
+
+ return usb_control_msg(
+ usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_DESCRIPTOR,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ USB_DT_KEY << 8 | key_index, 0,
+ &wusbhc->gtk.descr, wusbhc->gtk.descr.bLength,
+ USB_CTRL_SET_TIMEOUT);
+}
+
+
+/* FIXME: prototype for adding security */
+int wusb_dev_sec_add(struct wusbhc *wusbhc,
+ struct usb_device *usb_dev, struct wusb_dev *wusb_dev)
+{
+ int result, bytes, secd_size;
+ struct device *dev = &usb_dev->dev;
+ struct usb_security_descriptor *secd, *new_secd;
+ const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL;
+ const void *itr, *top;
+ char buf[64];
+
+ secd = kmalloc(sizeof(*secd), GFP_KERNEL);
+ if (secd == NULL) {
+ result = -ENOMEM;
+ goto out;
+ }
+
+ result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
+ 0, secd, sizeof(*secd));
+ if (result < (int)sizeof(*secd)) {
+ dev_err(dev, "Can't read security descriptor or "
+ "not enough data: %d\n", result);
+ goto out;
+ }
+ secd_size = le16_to_cpu(secd->wTotalLength);
+ new_secd = krealloc(secd, secd_size, GFP_KERNEL);
+ if (new_secd == NULL) {
+ dev_err(dev,
+ "Can't allocate space for security descriptors\n");
+ result = -ENOMEM;
+ goto out;
+ }
+ secd = new_secd;
+ result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
+ 0, secd, secd_size);
+ if (result < secd_size) {
+ dev_err(dev, "Can't read security descriptor or "
+ "not enough data: %d\n", result);
+ goto out;
+ }
+ bytes = 0;
+ itr = &secd[1];
+ top = (void *)secd + result;
+ while (itr < top) {
+ etd = itr;
+ if (top - itr < sizeof(*etd)) {
+ dev_err(dev, "BUG: bad device security descriptor; "
+ "not enough data (%zu vs %zu bytes left)\n",
+ top - itr, sizeof(*etd));
+ break;
+ }
+ if (etd->bLength < sizeof(*etd)) {
+ dev_err(dev, "BUG: bad device encryption descriptor; "
+ "descriptor is too short "
+ "(%u vs %zu needed)\n",
+ etd->bLength, sizeof(*etd));
+ break;
+ }
+ itr += etd->bLength;
+ bytes += snprintf(buf + bytes, sizeof(buf) - bytes,
+ "%s (0x%02x/%02x) ",
+ wusb_et_name(etd->bEncryptionType),
+ etd->bEncryptionValue, etd->bAuthKeyIndex);
+ if (etd->bEncryptionType == USB_ENC_TYPE_CCM_1)
+ ccm1_etd = etd;
+ }
+ /* This code only supports CCM1 as of now. */
+ /* FIXME: user has to choose which sec mode to use?
+ * In theory we want CCM */
+ if (ccm1_etd == NULL) {
+ dev_err(dev, "WUSB device doesn't support CCM1 encryption, "
+ "can't use!\n");
+ result = -EINVAL;
+ goto out;
+ }
+ wusb_dev->ccm1_etd = *ccm1_etd;
+ dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n",
+ buf, wusb_et_name(ccm1_etd->bEncryptionType),
+ ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex);
+ result = 0;
+out:
+ kfree(secd);
+ return result;
+}
+
+void wusb_dev_sec_rm(struct wusb_dev *wusb_dev)
+{
+ /* Nothing so far */
+}
+
+/**
+ * Update the address of an unauthenticated WUSB device
+ *
+ * Once we have successfully authenticated, we take it to addr0 state
+ * and then to a normal address.
+ *
+ * Before the device's address (as known by it) was usb_dev->devnum |
+ * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum.
+ */
+int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev)
+{
+ int result = -ENOMEM;
+ struct usb_device *usb_dev = wusb_dev->usb_dev;
+ struct device *dev = &usb_dev->dev;
+ u8 new_address = wusb_dev->addr & 0x7F;
+
+ /* Set address 0 */
+ result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_ADDRESS,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (result < 0) {
+ dev_err(dev, "auth failed: can't set address 0: %d\n",
+ result);
+ goto error_addr0;
+ }
+ result = wusb_set_dev_addr(wusbhc, wusb_dev, 0);
+ if (result < 0)
+ goto error_addr0;
+ usb_set_device_state(usb_dev, USB_STATE_DEFAULT);
+ usb_ep0_reinit(usb_dev);
+
+ /* Set new (authenticated) address. */
+ result = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_ADDRESS,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ new_address, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (result < 0) {
+ dev_err(dev, "auth failed: can't set address %u: %d\n",
+ new_address, result);
+ goto error_addr;
+ }
+ result = wusb_set_dev_addr(wusbhc, wusb_dev, new_address);
+ if (result < 0)
+ goto error_addr;
+ usb_set_device_state(usb_dev, USB_STATE_ADDRESS);
+ usb_ep0_reinit(usb_dev);
+ usb_dev->authenticated = 1;
+error_addr:
+error_addr0:
+ return result;
+}
+
+/*
+ *
+ *
+ */
+/* FIXME: split and cleanup */
+int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
+ struct wusb_ckhdid *ck)
+{
+ int result = -ENOMEM;
+ struct usb_device *usb_dev = wusb_dev->usb_dev;
+ struct device *dev = &usb_dev->dev;
+ u32 tkid;
+ struct usb_handshake *hs;
+ struct aes_ccm_nonce ccm_n;
+ u8 mic[8];
+ struct wusb_keydvt_in keydvt_in;
+ struct wusb_keydvt_out keydvt_out;
+
+ hs = kcalloc(3, sizeof(hs[0]), GFP_KERNEL);
+ if (!hs)
+ goto error_kzalloc;
+
+ /* We need to turn encryption before beginning the 4way
+ * hshake (WUSB1.0[.3.2.2]) */
+ result = wusb_dev_set_encryption(usb_dev, 1);
+ if (result < 0)
+ goto error_dev_set_encryption;
+
+ tkid = wusbhc_next_tkid(wusbhc, wusb_dev);
+
+ hs[0].bMessageNumber = 1;
+ hs[0].bStatus = 0;
+ put_unaligned_le32(tkid, hs[0].tTKID);
+ hs[0].bReserved = 0;
+ memcpy(hs[0].CDID, &wusb_dev->cdid, sizeof(hs[0].CDID));
+ get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce));
+ memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */
+
+ result = usb_control_msg(
+ usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_HANDSHAKE,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ 1, 0, &hs[0], sizeof(hs[0]), USB_CTRL_SET_TIMEOUT);
+ if (result < 0) {
+ dev_err(dev, "Handshake1: request failed: %d\n", result);
+ goto error_hs1;
+ }
+
+ /* Handshake 2, from the device -- need to verify fields */
+ result = usb_control_msg(
+ usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_GET_HANDSHAKE,
+ USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ 2, 0, &hs[1], sizeof(hs[1]), USB_CTRL_GET_TIMEOUT);
+ if (result < 0) {
+ dev_err(dev, "Handshake2: request failed: %d\n", result);
+ goto error_hs2;
+ }
+
+ result = -EINVAL;
+ if (hs[1].bMessageNumber != 2) {
+ dev_err(dev, "Handshake2 failed: bad message number %u\n",
+ hs[1].bMessageNumber);
+ goto error_hs2;
+ }
+ if (hs[1].bStatus != 0) {
+ dev_err(dev, "Handshake2 failed: bad status %u\n",
+ hs[1].bStatus);
+ goto error_hs2;
+ }
+ if (memcmp(hs[0].tTKID, hs[1].tTKID, sizeof(hs[0].tTKID))) {
+ dev_err(dev, "Handshake2 failed: TKID mismatch "
+ "(#1 0x%02x%02x%02x vs #2 0x%02x%02x%02x)\n",
+ hs[0].tTKID[0], hs[0].tTKID[1], hs[0].tTKID[2],
+ hs[1].tTKID[0], hs[1].tTKID[1], hs[1].tTKID[2]);
+ goto error_hs2;
+ }
+ if (memcmp(hs[0].CDID, hs[1].CDID, sizeof(hs[0].CDID))) {
+ dev_err(dev, "Handshake2 failed: CDID mismatch\n");
+ goto error_hs2;
+ }
+
+ /* Setup the CCM nonce */
+ memset(&ccm_n.sfn, 0, sizeof(ccm_n.sfn)); /* Per WUSB1.0[6.5.2] */
+ put_unaligned_le32(tkid, ccm_n.tkid);
+ ccm_n.src_addr = wusbhc->uwb_rc->uwb_dev.dev_addr;
+ ccm_n.dest_addr.data[0] = wusb_dev->addr;
+ ccm_n.dest_addr.data[1] = 0;
+
+ /* Derive the KCK and PTK from CK, the CCM, H and D nonces */
+ memcpy(keydvt_in.hnonce, hs[0].nonce, sizeof(keydvt_in.hnonce));
+ memcpy(keydvt_in.dnonce, hs[1].nonce, sizeof(keydvt_in.dnonce));
+ result = wusb_key_derive(&keydvt_out, ck->data, &ccm_n, &keydvt_in);
+ if (result < 0) {
+ dev_err(dev, "Handshake2 failed: cannot derive keys: %d\n",
+ result);
+ goto error_hs2;
+ }
+
+ /* Compute MIC and verify it */
+ result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]);
+ if (result < 0) {
+ dev_err(dev, "Handshake2 failed: cannot compute MIC: %d\n",
+ result);
+ goto error_hs2;
+ }
+
+ if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) {
+ dev_err(dev, "Handshake2 failed: MIC mismatch\n");
+ goto error_hs2;
+ }
+
+ /* Send Handshake3 */
+ hs[2].bMessageNumber = 3;
+ hs[2].bStatus = 0;
+ put_unaligned_le32(tkid, hs[2].tTKID);
+ hs[2].bReserved = 0;
+ memcpy(hs[2].CDID, &wusb_dev->cdid, sizeof(hs[2].CDID));
+ memcpy(hs[2].nonce, hs[0].nonce, sizeof(hs[2].nonce));
+ result = wusb_oob_mic(hs[2].MIC, keydvt_out.kck, &ccm_n, &hs[2]);
+ if (result < 0) {
+ dev_err(dev, "Handshake3 failed: cannot compute MIC: %d\n",
+ result);
+ goto error_hs2;
+ }
+
+ result = usb_control_msg(
+ usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ USB_REQ_SET_HANDSHAKE,
+ USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
+ 3, 0, &hs[2], sizeof(hs[2]), USB_CTRL_SET_TIMEOUT);
+ if (result < 0) {
+ dev_err(dev, "Handshake3: request failed: %d\n", result);
+ goto error_hs3;
+ }
+
+ result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid,
+ keydvt_out.ptk, sizeof(keydvt_out.ptk));
+ if (result < 0)
+ goto error_wusbhc_set_ptk;
+
+ result = wusb_dev_set_gtk(wusbhc, wusb_dev);
+ if (result < 0) {
+ dev_err(dev, "Set GTK for device: request failed: %d\n",
+ result);
+ goto error_wusbhc_set_gtk;
+ }
+
+ /* Update the device's address from unauth to auth */
+ if (usb_dev->authenticated == 0) {
+ result = wusb_dev_update_address(wusbhc, wusb_dev);
+ if (result < 0)
+ goto error_dev_update_address;
+ }
+ result = 0;
+ dev_info(dev, "device authenticated\n");
+
+error_dev_update_address:
+error_wusbhc_set_gtk:
+error_wusbhc_set_ptk:
+error_hs3:
+error_hs2:
+error_hs1:
+ memset(hs, 0, 3*sizeof(hs[0]));
+ memzero_explicit(&keydvt_out, sizeof(keydvt_out));
+ memzero_explicit(&keydvt_in, sizeof(keydvt_in));
+ memzero_explicit(&ccm_n, sizeof(ccm_n));
+ memzero_explicit(mic, sizeof(mic));
+ if (result < 0)
+ wusb_dev_set_encryption(usb_dev, 0);
+error_dev_set_encryption:
+ kfree(hs);
+error_kzalloc:
+ return result;
+}
+
+/*
+ * Once all connected and authenticated devices have received the new
+ * GTK, switch the host to using it.
+ */
+static void wusbhc_gtk_rekey_work(struct work_struct *work)
+{
+ struct wusbhc *wusbhc = container_of(work,
+ struct wusbhc, gtk_rekey_work);
+ size_t key_size = sizeof(wusbhc->gtk.data);
+ int port_idx;
+ struct wusb_dev *wusb_dev, *wusb_dev_next;
+ LIST_HEAD(rekey_list);
+
+ mutex_lock(&wusbhc->mutex);
+ /* generate the new key */
+ wusbhc_generate_gtk(wusbhc);
+ /* roll the gtk index. */
+ wusbhc->gtk_index = (wusbhc->gtk_index + 1) % (WUSB_KEY_INDEX_MAX + 1);
+ /*
+ * Save all connected devices on a list while holding wusbhc->mutex and
+ * take a reference to each one. Then submit the set key request to
+ * them after releasing the lock in order to avoid a deadlock.
+ */
+ for (port_idx = 0; port_idx < wusbhc->ports_max; port_idx++) {
+ wusb_dev = wusbhc->port[port_idx].wusb_dev;
+ if (!wusb_dev || !wusb_dev->usb_dev
+ || !wusb_dev->usb_dev->authenticated)
+ continue;
+
+ wusb_dev_get(wusb_dev);
+ list_add_tail(&wusb_dev->rekey_node, &rekey_list);
+ }
+ mutex_unlock(&wusbhc->mutex);
+
+ /* Submit the rekey requests without holding wusbhc->mutex. */
+ list_for_each_entry_safe(wusb_dev, wusb_dev_next, &rekey_list,
+ rekey_node) {
+ list_del_init(&wusb_dev->rekey_node);
+ dev_dbg(&wusb_dev->usb_dev->dev,
+ "%s: rekey device at port %d\n",
+ __func__, wusb_dev->port_idx);
+
+ if (wusb_dev_set_gtk(wusbhc, wusb_dev) < 0) {
+ dev_err(&wusb_dev->usb_dev->dev,
+ "%s: rekey device at port %d failed\n",
+ __func__, wusb_dev->port_idx);
+ }
+ wusb_dev_put(wusb_dev);
+ }
+
+ /* Switch the host controller to use the new GTK. */
+ mutex_lock(&wusbhc->mutex);
+ wusbhc->set_gtk(wusbhc, wusbhc->gtk_tkid,
+ &wusbhc->gtk.descr.bKeyData, key_size);
+ mutex_unlock(&wusbhc->mutex);
+}
+
+/**
+ * wusbhc_gtk_rekey - generate and distribute a new GTK
+ * @wusbhc: the WUSB host controller
+ *
+ * Generate a new GTK and distribute it to all connected and
+ * authenticated devices. When all devices have the new GTK, the host
+ * starts using it.
+ *
+ * This must be called after every device disconnect (see [WUSB]
+ * section 6.2.11.2).
+ */
+void wusbhc_gtk_rekey(struct wusbhc *wusbhc)
+{
+ /*
+ * We need to submit a URB to the downstream WUSB devices in order to
+ * change the group key. This can't be done while holding the
+ * wusbhc->mutex since that is also taken in the urb_enqueue routine
+ * and will cause a deadlock. Instead, queue a work item to do
+ * it when the lock is not held
+ */
+ queue_work(wusbhc->wq_security, &wusbhc->gtk_rekey_work);
+}
diff --git a/drivers/staging/wusbcore/wa-hc.c b/drivers/staging/wusbcore/wa-hc.c
new file mode 100644
index 000000000000..6827075fb8a1
--- /dev/null
+++ b/drivers/staging/wusbcore/wa-hc.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wire Adapter Host Controller Driver
+ * Common items to HWA and DWA based HCDs
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * FIXME: docs
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "wusbhc.h"
+#include "wa-hc.h"
+
+/**
+ * Assumes
+ *
+ * wa->usb_dev and wa->usb_iface initialized and refcounted,
+ * wa->wa_descr initialized.
+ */
+int wa_create(struct wahc *wa, struct usb_interface *iface,
+ kernel_ulong_t quirks)
+{
+ int result;
+ struct device *dev = &iface->dev;
+
+ if (iface->cur_altsetting->desc.bNumEndpoints < 3)
+ return -ENODEV;
+
+ result = wa_rpipes_create(wa);
+ if (result < 0)
+ goto error_rpipes_create;
+ wa->quirks = quirks;
+ /* Fill up Data Transfer EP pointers */
+ wa->dti_epd = &iface->cur_altsetting->endpoint[1].desc;
+ wa->dto_epd = &iface->cur_altsetting->endpoint[2].desc;
+ wa->dti_buf_size = usb_endpoint_maxp(wa->dti_epd);
+ wa->dti_buf = kmalloc(wa->dti_buf_size, GFP_KERNEL);
+ if (wa->dti_buf == NULL) {
+ result = -ENOMEM;
+ goto error_dti_buf_alloc;
+ }
+ result = wa_nep_create(wa, iface);
+ if (result < 0) {
+ dev_err(dev, "WA-CDS: can't initialize notif endpoint: %d\n",
+ result);
+ goto error_nep_create;
+ }
+ return 0;
+
+error_nep_create:
+ kfree(wa->dti_buf);
+error_dti_buf_alloc:
+ wa_rpipes_destroy(wa);
+error_rpipes_create:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wa_create);
+
+
+void __wa_destroy(struct wahc *wa)
+{
+ if (wa->dti_urb) {
+ usb_kill_urb(wa->dti_urb);
+ usb_put_urb(wa->dti_urb);
+ }
+ kfree(wa->dti_buf);
+ wa_nep_destroy(wa);
+ wa_rpipes_destroy(wa);
+}
+EXPORT_SYMBOL_GPL(__wa_destroy);
+
+/**
+ * wa_reset_all - reset the WA device
+ * @wa: the WA to be reset
+ *
+ * For HWAs the radio controller and all other PALs are also reset.
+ */
+void wa_reset_all(struct wahc *wa)
+{
+ /* FIXME: assuming HWA. */
+ wusbhc_reset_all(wa->wusb);
+}
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Wireless USB Wire Adapter core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wusbcore/wa-hc.h b/drivers/staging/wusbcore/wa-hc.h
new file mode 100644
index 000000000000..5a38465724c2
--- /dev/null
+++ b/drivers/staging/wusbcore/wa-hc.h
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HWA Host Controller Driver
+ * Wire Adapter Control/Data Streaming Iface (WUSB1.0[8])
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This driver implements a USB Host Controller (struct usb_hcd) for a
+ * Wireless USB Host Controller based on the Wireless USB 1.0
+ * Host-Wire-Adapter specification (in layman terms, a USB-dongle that
+ * implements a Wireless USB host).
+ *
+ * Check out the Design-overview.txt file in the source documentation
+ * for other details on the implementation.
+ *
+ * Main blocks:
+ *
+ * driver glue with the driver API, workqueue daemon
+ *
+ * lc RC instance life cycle management (create, destroy...)
+ *
+ * hcd glue with the USB API Host Controller Interface API.
+ *
+ * nep Notification EndPoint management: collect notifications
+ * and queue them with the workqueue daemon.
+ *
+ * Handle notifications as coming from the NEP. Sends them
+ * off others to their respective modules (eg: connect,
+ * disconnect and reset go to devconnect).
+ *
+ * rpipe Remote Pipe management; rpipe is what we use to write
+ * to an endpoint on a WUSB device that is connected to a
+ * HWA RC.
+ *
+ * xfer Transfer management -- this is all the code that gets a
+ * buffer and pushes it to a device (or viceversa). *
+ *
+ * Some day a lot of this code will be shared between this driver and
+ * the drivers for DWA (xfer, rpipe).
+ *
+ * All starts at driver.c:hwahc_probe(), when one of this guys is
+ * connected. hwahc_disconnect() stops it.
+ *
+ * During operation, the main driver is devices connecting or
+ * disconnecting. They cause the HWA RC to send notifications into
+ * nep.c:hwahc_nep_cb() that will dispatch them to
+ * notif.c:wa_notif_dispatch(). From there they will fan to cause
+ * device connects, disconnects, etc.
+ *
+ * Note much of the activity is difficult to follow. For example a
+ * device connect goes to devconnect, which will cause the "fake" root
+ * hub port to show a connect and stop there. Then hub_wq will notice
+ * and call into the rh.c:hwahc_rc_port_reset() code to authenticate
+ * the device (and this might require user intervention) and enable
+ * the port.
+ *
+ * We also have a timer workqueue going from devconnect.c that
+ * schedules in hwahc_devconnect_create().
+ *
+ * The rest of the traffic is in the usual entry points of a USB HCD,
+ * which are hooked up in driver.c:hwahc_rc_driver, and defined in
+ * hcd.c.
+ */
+
+#ifndef __HWAHC_INTERNAL_H__
+#define __HWAHC_INTERNAL_H__
+
+#include <linux/completion.h>
+#include <linux/usb.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include "../uwb/uwb.h"
+#include "include/wusb.h"
+#include "include/wusb-wa.h"
+
+struct wusbhc;
+struct wahc;
+extern void wa_urb_enqueue_run(struct work_struct *ws);
+extern void wa_process_errored_transfers_run(struct work_struct *ws);
+
+/**
+ * RPipe instance
+ *
+ * @descr's fields are kept in LE, as we need to send it back and
+ * forth.
+ *
+ * @wa is referenced when set
+ *
+ * @segs_available is the number of requests segments that still can
+ * be submitted to the controller without overloading
+ * it. It is initialized to descr->wRequests when
+ * aiming.
+ *
+ * A rpipe supports a max of descr->wRequests at the same time; before
+ * submitting seg_lock has to be taken. If segs_avail > 0, then we can
+ * submit; if not, we have to queue them.
+ */
+struct wa_rpipe {
+ struct kref refcnt;
+ struct usb_rpipe_descriptor descr;
+ struct usb_host_endpoint *ep;
+ struct wahc *wa;
+ spinlock_t seg_lock;
+ struct list_head seg_list;
+ struct list_head list_node;
+ atomic_t segs_available;
+ u8 buffer[1]; /* For reads/writes on USB */
+};
+
+
+enum wa_dti_state {
+ WA_DTI_TRANSFER_RESULT_PENDING,
+ WA_DTI_ISOC_PACKET_STATUS_PENDING,
+ WA_DTI_BUF_IN_DATA_PENDING
+};
+
+enum wa_quirks {
+ /*
+ * The Alereon HWA expects the data frames in isochronous transfer
+ * requests to be concatenated and not sent as separate packets.
+ */
+ WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC = 0x01,
+ /*
+ * The Alereon HWA can be instructed to not send transfer notifications
+ * as an optimization.
+ */
+ WUSB_QUIRK_ALEREON_HWA_DISABLE_XFER_NOTIFICATIONS = 0x02,
+};
+
+enum wa_vendor_specific_requests {
+ WA_REQ_ALEREON_DISABLE_XFER_NOTIFICATIONS = 0x4C,
+ WA_REQ_ALEREON_FEATURE_SET = 0x01,
+ WA_REQ_ALEREON_FEATURE_CLEAR = 0x00,
+};
+
+#define WA_MAX_BUF_IN_URBS 4
+/**
+ * Instance of a HWA Host Controller
+ *
+ * Except where a more specific lock/mutex applies or atomic, all
+ * fields protected by @mutex.
+ *
+ * @wa_descr Can be accessed without locking because it is in
+ * the same area where the device descriptors were
+ * read, so it is guaranteed to exist unmodified while
+ * the device exists.
+ *
+ * Endianess has been converted to CPU's.
+ *
+ * @nep_* can be accessed without locking as its processing is
+ * serialized; we submit a NEP URB and it comes to
+ * hwahc_nep_cb(), which won't issue another URB until it is
+ * done processing it.
+ *
+ * @xfer_list:
+ *
+ * List of active transfers to verify existence from a xfer id
+ * gotten from the xfer result message. Can't use urb->list because
+ * it goes by endpoint, and we don't know the endpoint at the time
+ * when we get the xfer result message. We can't really rely on the
+ * pointer (will have to change for 64 bits) as the xfer id is 32 bits.
+ *
+ * @xfer_delayed_list: List of transfers that need to be started
+ * (with a workqueue, because they were
+ * submitted from an atomic context).
+ *
+ * FIXME: this needs to be layered up: a wusbhc layer (for sharing
+ * commonalities with WHCI), a wa layer (for sharing
+ * commonalities with DWA-RC).
+ */
+struct wahc {
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_iface;
+
+ /* HC to deliver notifications */
+ union {
+ struct wusbhc *wusb;
+ struct dwahc *dwa;
+ };
+
+ const struct usb_endpoint_descriptor *dto_epd, *dti_epd;
+ const struct usb_wa_descriptor *wa_descr;
+
+ struct urb *nep_urb; /* Notification EndPoint [lockless] */
+ struct edc nep_edc;
+ void *nep_buffer;
+ size_t nep_buffer_size;
+
+ atomic_t notifs_queued;
+
+ u16 rpipes;
+ unsigned long *rpipe_bm; /* rpipe usage bitmap */
+ struct list_head rpipe_delayed_list; /* delayed RPIPES. */
+ spinlock_t rpipe_lock; /* protect rpipe_bm and delayed list */
+ struct mutex rpipe_mutex; /* assigning resources to endpoints */
+
+ /*
+ * dti_state is used to track the state of the dti_urb. When dti_state
+ * is WA_DTI_ISOC_PACKET_STATUS_PENDING, dti_isoc_xfer_in_progress and
+ * dti_isoc_xfer_seg identify which xfer the incoming isoc packet
+ * status refers to.
+ */
+ enum wa_dti_state dti_state;
+ u32 dti_isoc_xfer_in_progress;
+ u8 dti_isoc_xfer_seg;
+ struct urb *dti_urb; /* URB for reading xfer results */
+ /* URBs for reading data in */
+ struct urb buf_in_urbs[WA_MAX_BUF_IN_URBS];
+ int active_buf_in_urbs; /* number of buf_in_urbs active. */
+ struct edc dti_edc; /* DTI error density counter */
+ void *dti_buf;
+ size_t dti_buf_size;
+
+ unsigned long dto_in_use; /* protect dto endoint serialization */
+
+ s32 status; /* For reading status */
+
+ struct list_head xfer_list;
+ struct list_head xfer_delayed_list;
+ struct list_head xfer_errored_list;
+ /*
+ * lock for the above xfer lists. Can be taken while a xfer->lock is
+ * held but not in the reverse order.
+ */
+ spinlock_t xfer_list_lock;
+ struct work_struct xfer_enqueue_work;
+ struct work_struct xfer_error_work;
+ atomic_t xfer_id_count;
+
+ kernel_ulong_t quirks;
+};
+
+
+extern int wa_create(struct wahc *wa, struct usb_interface *iface,
+ kernel_ulong_t);
+extern void __wa_destroy(struct wahc *wa);
+extern int wa_dti_start(struct wahc *wa);
+void wa_reset_all(struct wahc *wa);
+
+
+/* Miscellaneous constants */
+enum {
+ /** Max number of EPROTO errors we tolerate on the NEP in a
+ * period of time */
+ HWAHC_EPROTO_MAX = 16,
+ /** Period of time for EPROTO errors (in jiffies) */
+ HWAHC_EPROTO_PERIOD = 4 * HZ,
+};
+
+
+/* Notification endpoint handling */
+extern int wa_nep_create(struct wahc *, struct usb_interface *);
+extern void wa_nep_destroy(struct wahc *);
+
+static inline int wa_nep_arm(struct wahc *wa, gfp_t gfp_mask)
+{
+ struct urb *urb = wa->nep_urb;
+ urb->transfer_buffer = wa->nep_buffer;
+ urb->transfer_buffer_length = wa->nep_buffer_size;
+ return usb_submit_urb(urb, gfp_mask);
+}
+
+static inline void wa_nep_disarm(struct wahc *wa)
+{
+ usb_kill_urb(wa->nep_urb);
+}
+
+
+/* RPipes */
+static inline void wa_rpipe_init(struct wahc *wa)
+{
+ INIT_LIST_HEAD(&wa->rpipe_delayed_list);
+ spin_lock_init(&wa->rpipe_lock);
+ mutex_init(&wa->rpipe_mutex);
+}
+
+static inline void wa_init(struct wahc *wa)
+{
+ int index;
+
+ edc_init(&wa->nep_edc);
+ atomic_set(&wa->notifs_queued, 0);
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+ wa_rpipe_init(wa);
+ edc_init(&wa->dti_edc);
+ INIT_LIST_HEAD(&wa->xfer_list);
+ INIT_LIST_HEAD(&wa->xfer_delayed_list);
+ INIT_LIST_HEAD(&wa->xfer_errored_list);
+ spin_lock_init(&wa->xfer_list_lock);
+ INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
+ INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
+ wa->dto_in_use = 0;
+ atomic_set(&wa->xfer_id_count, 1);
+ /* init the buf in URBs */
+ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index)
+ usb_init_urb(&(wa->buf_in_urbs[index]));
+ wa->active_buf_in_urbs = 0;
+}
+
+/**
+ * Destroy a pipe (when refcount drops to zero)
+ *
+ * Assumes it has been moved to the "QUIESCING" state.
+ */
+struct wa_xfer;
+extern void rpipe_destroy(struct kref *_rpipe);
+static inline
+void __rpipe_get(struct wa_rpipe *rpipe)
+{
+ kref_get(&rpipe->refcnt);
+}
+extern int rpipe_get_by_ep(struct wahc *, struct usb_host_endpoint *,
+ struct urb *, gfp_t);
+static inline void rpipe_put(struct wa_rpipe *rpipe)
+{
+ kref_put(&rpipe->refcnt, rpipe_destroy);
+
+}
+extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
+extern void rpipe_clear_feature_stalled(struct wahc *,
+ struct usb_host_endpoint *);
+extern int wa_rpipes_create(struct wahc *);
+extern void wa_rpipes_destroy(struct wahc *);
+static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
+{
+ atomic_dec(&rpipe->segs_available);
+}
+
+/**
+ * Returns true if the rpipe is ready to submit more segments.
+ */
+static inline int rpipe_avail_inc(struct wa_rpipe *rpipe)
+{
+ return atomic_inc_return(&rpipe->segs_available) > 0
+ && !list_empty(&rpipe->seg_list);
+}
+
+
+/* Transferring data */
+extern int wa_urb_enqueue(struct wahc *, struct usb_host_endpoint *,
+ struct urb *, gfp_t);
+extern int wa_urb_dequeue(struct wahc *, struct urb *, int);
+extern void wa_handle_notif_xfer(struct wahc *, struct wa_notif_hdr *);
+
+
+/* Misc
+ *
+ * FIXME: Refcounting for the actual @hwahc object is not correct; I
+ * mean, this should be refcounting on the HCD underneath, but
+ * it is not. In any case, the semantics for HCD refcounting
+ * are *weird*...on refcount reaching zero it just frees
+ * it...no RC specific function is called...unless I miss
+ * something.
+ *
+ * FIXME: has to go away in favour of a 'struct' hcd based solution
+ */
+static inline struct wahc *wa_get(struct wahc *wa)
+{
+ usb_get_intf(wa->usb_iface);
+ return wa;
+}
+
+static inline void wa_put(struct wahc *wa)
+{
+ usb_put_intf(wa->usb_iface);
+}
+
+
+static inline int __wa_feature(struct wahc *wa, unsigned op, u16 feature)
+{
+ return usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ op ? USB_REQ_SET_FEATURE : USB_REQ_CLEAR_FEATURE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ feature,
+ wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+
+static inline int __wa_set_feature(struct wahc *wa, u16 feature)
+{
+ return __wa_feature(wa, 1, feature);
+}
+
+
+static inline int __wa_clear_feature(struct wahc *wa, u16 feature)
+{
+ return __wa_feature(wa, 0, feature);
+}
+
+
+/**
+ * Return the status of a Wire Adapter
+ *
+ * @wa: Wire Adapter instance
+ * @returns < 0 errno code on error, or status bitmap as described
+ * in WUSB1.0[8.3.1.6].
+ *
+ * NOTE: need malloc, some arches don't take USB from the stack
+ */
+static inline
+s32 __wa_get_status(struct wahc *wa)
+{
+ s32 result;
+ result = usb_control_msg(
+ wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+ USB_REQ_GET_STATUS,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ 0, wa->usb_iface->cur_altsetting->desc.bInterfaceNumber,
+ &wa->status, sizeof(wa->status), USB_CTRL_GET_TIMEOUT);
+ if (result >= 0)
+ result = wa->status;
+ return result;
+}
+
+
+/**
+ * Waits until the Wire Adapter's status matches @mask/@value
+ *
+ * @wa: Wire Adapter instance.
+ * @returns < 0 errno code on error, otherwise status.
+ *
+ * Loop until the WAs status matches the mask and value (status & mask
+ * == value). Timeout if it doesn't happen.
+ *
+ * FIXME: is there an official specification on how long status
+ * changes can take?
+ */
+static inline s32 __wa_wait_status(struct wahc *wa, u32 mask, u32 value)
+{
+ s32 result;
+ unsigned loops = 10;
+ do {
+ msleep(50);
+ result = __wa_get_status(wa);
+ if ((result & mask) == value)
+ break;
+ if (loops-- == 0) {
+ result = -ETIMEDOUT;
+ break;
+ }
+ } while (result >= 0);
+ return result;
+}
+
+
+/** Command @hwahc to stop, @returns 0 if ok, < 0 errno code on error */
+static inline int __wa_stop(struct wahc *wa)
+{
+ int result;
+ struct device *dev = &wa->usb_iface->dev;
+
+ result = __wa_clear_feature(wa, WA_ENABLE);
+ if (result < 0 && result != -ENODEV) {
+ dev_err(dev, "error commanding HC to stop: %d\n", result);
+ goto out;
+ }
+ result = __wa_wait_status(wa, WA_ENABLE, 0);
+ if (result < 0 && result != -ENODEV)
+ dev_err(dev, "error waiting for HC to stop: %d\n", result);
+out:
+ return 0;
+}
+
+
+#endif /* #ifndef __HWAHC_INTERNAL_H__ */
diff --git a/drivers/staging/wusbcore/wa-nep.c b/drivers/staging/wusbcore/wa-nep.c
new file mode 100644
index 000000000000..5f0656db5482
--- /dev/null
+++ b/drivers/staging/wusbcore/wa-nep.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8])
+ * Notification EndPoint support
+ *
+ * Copyright (C) 2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This part takes care of getting the notification from the hw
+ * only and dispatching through wusbwad into
+ * wa_notif_dispatch. Handling is done there.
+ *
+ * WA notifications are limited in size; most of them are three or
+ * four bytes long, and the longest is the HWA Device Notification,
+ * which would not exceed 38 bytes (DNs are limited in payload to 32
+ * bytes plus 3 bytes header (WUSB1.0[7.6p2]), plus 3 bytes HWA
+ * header (WUSB1.0[8.5.4.2]).
+ *
+ * It is not clear if more than one Device Notification can be packed
+ * in a HWA Notification, I assume no because of the wording in
+ * WUSB1.0[8.5.4.2]. In any case, the bigger any notification could
+ * get is 256 bytes (as the bLength field is a byte).
+ *
+ * So what we do is we have this buffer and read into it; when a
+ * notification arrives we schedule work to a specific, single thread
+ * workqueue (so notifications are serialized) and copy the
+ * notification data. After scheduling the work, we rearm the read from
+ * the notification endpoint.
+ *
+ * Entry points here are:
+ *
+ * wa_nep_[create|destroy]() To initialize/release this subsystem
+ *
+ * wa_nep_cb() Callback for the notification
+ * endpoint; when data is ready, this
+ * does the dispatching.
+ */
+#include <linux/workqueue.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+
+#include "wa-hc.h"
+#include "wusbhc.h"
+
+/* Structure for queueing notifications to the workqueue */
+struct wa_notif_work {
+ struct work_struct work;
+ struct wahc *wa;
+ size_t size;
+ u8 data[];
+};
+
+/*
+ * Process incoming notifications from the WA's Notification EndPoint
+ * [the wuswad daemon, basically]
+ *
+ * @_nw: Pointer to a descriptor which has the pointer to the
+ * @wa, the size of the buffer and the work queue
+ * structure (so we can free all when done).
+ * @returns 0 if ok, < 0 errno code on error.
+ *
+ * All notifications follow the same format; they need to start with a
+ * 'struct wa_notif_hdr' header, so it is easy to parse through
+ * them. We just break the buffer in individual notifications (the
+ * standard doesn't say if it can be done or is forbidden, so we are
+ * cautious) and dispatch each.
+ *
+ * So the handling layers are is:
+ *
+ * WA specific notification (from NEP)
+ * Device Notification Received -> wa_handle_notif_dn()
+ * WUSB Device notification generic handling
+ * BPST Adjustment -> wa_handle_notif_bpst_adj()
+ * ... -> ...
+ *
+ * @wa has to be referenced
+ */
+static void wa_notif_dispatch(struct work_struct *ws)
+{
+ void *itr;
+ u8 missing = 0;
+ struct wa_notif_work *nw = container_of(ws, struct wa_notif_work,
+ work);
+ struct wahc *wa = nw->wa;
+ struct wa_notif_hdr *notif_hdr;
+ size_t size;
+
+ struct device *dev = &wa->usb_iface->dev;
+
+#if 0
+ /* FIXME: need to check for this??? */
+ if (usb_hcd->state == HC_STATE_QUIESCING) /* Going down? */
+ goto out; /* screw it */
+#endif
+ atomic_dec(&wa->notifs_queued); /* Throttling ctl */
+ size = nw->size;
+ itr = nw->data;
+
+ while (size) {
+ if (size < sizeof(*notif_hdr)) {
+ missing = sizeof(*notif_hdr) - size;
+ goto exhausted_buffer;
+ }
+ notif_hdr = itr;
+ if (size < notif_hdr->bLength)
+ goto exhausted_buffer;
+ itr += notif_hdr->bLength;
+ size -= notif_hdr->bLength;
+ /* Dispatch the notification [don't use itr or size!] */
+ switch (notif_hdr->bNotifyType) {
+ case HWA_NOTIF_DN: {
+ struct hwa_notif_dn *hwa_dn;
+ hwa_dn = container_of(notif_hdr, struct hwa_notif_dn,
+ hdr);
+ wusbhc_handle_dn(wa->wusb, hwa_dn->bSourceDeviceAddr,
+ hwa_dn->dndata,
+ notif_hdr->bLength - sizeof(*hwa_dn));
+ break;
+ }
+ case WA_NOTIF_TRANSFER:
+ wa_handle_notif_xfer(wa, notif_hdr);
+ break;
+ case HWA_NOTIF_BPST_ADJ:
+ break; /* no action needed for BPST ADJ. */
+ case DWA_NOTIF_RWAKE:
+ case DWA_NOTIF_PORTSTATUS:
+ /* FIXME: unimplemented WA NOTIFs */
+ /* fallthru */
+ default:
+ dev_err(dev, "HWA: unknown notification 0x%x, "
+ "%zu bytes; discarding\n",
+ notif_hdr->bNotifyType,
+ (size_t)notif_hdr->bLength);
+ break;
+ }
+ }
+out:
+ wa_put(wa);
+ kfree(nw);
+ return;
+
+ /* THIS SHOULD NOT HAPPEN
+ *
+ * Buffer exahusted with partial data remaining; just warn and
+ * discard the data, as this should not happen.
+ */
+exhausted_buffer:
+ dev_warn(dev, "HWA: device sent short notification, "
+ "%d bytes missing; discarding %d bytes.\n",
+ missing, (int)size);
+ goto out;
+}
+
+/*
+ * Deliver incoming WA notifications to the wusbwa workqueue
+ *
+ * @wa: Pointer the Wire Adapter Controller Data Streaming
+ * instance (part of an 'struct usb_hcd').
+ * @size: Size of the received buffer
+ * @returns 0 if ok, < 0 errno code on error.
+ *
+ * The input buffer is @wa->nep_buffer, with @size bytes
+ * (guaranteed to fit in the allocated space,
+ * @wa->nep_buffer_size).
+ */
+static int wa_nep_queue(struct wahc *wa, size_t size)
+{
+ int result = 0;
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_notif_work *nw;
+
+ /* dev_fnstart(dev, "(wa %p, size %zu)\n", wa, size); */
+ BUG_ON(size > wa->nep_buffer_size);
+ if (size == 0)
+ goto out;
+ if (atomic_read(&wa->notifs_queued) > 200) {
+ if (printk_ratelimit())
+ dev_err(dev, "Too many notifications queued, "
+ "throttling back\n");
+ goto out;
+ }
+ nw = kzalloc(sizeof(*nw) + size, GFP_ATOMIC);
+ if (nw == NULL) {
+ if (printk_ratelimit())
+ dev_err(dev, "No memory to queue notification\n");
+ result = -ENOMEM;
+ goto out;
+ }
+ INIT_WORK(&nw->work, wa_notif_dispatch);
+ nw->wa = wa_get(wa);
+ nw->size = size;
+ memcpy(nw->data, wa->nep_buffer, size);
+ atomic_inc(&wa->notifs_queued); /* Throttling ctl */
+ queue_work(wusbd, &nw->work);
+out:
+ /* dev_fnend(dev, "(wa %p, size %zu) = result\n", wa, size, result); */
+ return result;
+}
+
+/*
+ * Callback for the notification event endpoint
+ *
+ * Check's that everything is fine and then passes the data to be
+ * queued to the workqueue.
+ */
+static void wa_nep_cb(struct urb *urb)
+{
+ int result;
+ struct wahc *wa = urb->context;
+ struct device *dev = &wa->usb_iface->dev;
+
+ switch (result = urb->status) {
+ case 0:
+ result = wa_nep_queue(wa, urb->actual_length);
+ if (result < 0)
+ dev_err(dev, "NEP: unable to process notification(s): "
+ "%d\n", result);
+ break;
+ case -ECONNRESET: /* Not an error, but a controlled situation; */
+ case -ENOENT: /* (we killed the URB)...so, no broadcast */
+ case -ESHUTDOWN:
+ dev_dbg(dev, "NEP: going down %d\n", urb->status);
+ goto out;
+ default: /* On general errors, we retry unless it gets ugly */
+ if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "NEP: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ goto out;
+ }
+ dev_err(dev, "NEP: URB error %d\n", urb->status);
+ }
+ result = wa_nep_arm(wa, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "NEP: cannot submit URB: %d\n", result);
+ wa_reset_all(wa);
+ }
+out:
+ return;
+}
+
+/*
+ * Initialize @wa's notification and event's endpoint stuff
+ *
+ * This includes the allocating the read buffer, the context ID
+ * allocation bitmap, the URB and submitting the URB.
+ */
+int wa_nep_create(struct wahc *wa, struct usb_interface *iface)
+{
+ int result;
+ struct usb_endpoint_descriptor *epd;
+ struct usb_device *usb_dev = interface_to_usbdev(iface);
+ struct device *dev = &iface->dev;
+
+ edc_init(&wa->nep_edc);
+ epd = &iface->cur_altsetting->endpoint[0].desc;
+ wa->nep_buffer_size = 1024;
+ wa->nep_buffer = kmalloc(wa->nep_buffer_size, GFP_KERNEL);
+ if (!wa->nep_buffer)
+ goto error_nep_buffer;
+ wa->nep_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (wa->nep_urb == NULL)
+ goto error_urb_alloc;
+ usb_fill_int_urb(wa->nep_urb, usb_dev,
+ usb_rcvintpipe(usb_dev, epd->bEndpointAddress),
+ wa->nep_buffer, wa->nep_buffer_size,
+ wa_nep_cb, wa, epd->bInterval);
+ result = wa_nep_arm(wa, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "Cannot submit notification URB: %d\n", result);
+ goto error_nep_arm;
+ }
+ return 0;
+
+error_nep_arm:
+ usb_free_urb(wa->nep_urb);
+error_urb_alloc:
+ kfree(wa->nep_buffer);
+error_nep_buffer:
+ return -ENOMEM;
+}
+
+void wa_nep_destroy(struct wahc *wa)
+{
+ wa_nep_disarm(wa);
+ usb_free_urb(wa->nep_urb);
+ kfree(wa->nep_buffer);
+}
diff --git a/drivers/staging/wusbcore/wa-rpipe.c b/drivers/staging/wusbcore/wa-rpipe.c
new file mode 100644
index 000000000000..a5734cbcd5ad
--- /dev/null
+++ b/drivers/staging/wusbcore/wa-rpipe.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * WUSB Wire Adapter
+ * rpipe management
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * FIXME: docs
+ *
+ * RPIPE
+ *
+ * Targeted at different downstream endpoints
+ *
+ * Descriptor: use to config the remote pipe.
+ *
+ * The number of blocks could be dynamic (wBlocks in descriptor is
+ * 0)--need to schedule them then.
+ *
+ * Each bit in wa->rpipe_bm represents if an rpipe is being used or
+ * not. Rpipes are represented with a 'struct wa_rpipe' that is
+ * attached to the hcpriv member of a 'struct usb_host_endpoint'.
+ *
+ * When you need to xfer data to an endpoint, you get an rpipe for it
+ * with wa_ep_rpipe_get(), which gives you a reference to the rpipe
+ * and keeps a single one (the first one) with the endpoint. When you
+ * are done transferring, you drop that reference. At the end the
+ * rpipe is always allocated and bound to the endpoint. There it might
+ * be recycled when not used.
+ *
+ * Addresses:
+ *
+ * We use a 1:1 mapping mechanism between port address (0 based
+ * index, actually) and the address. The USB stack knows about this.
+ *
+ * USB Stack port number 4 (1 based)
+ * WUSB code port index 3 (0 based)
+ * USB Address 5 (2 based -- 0 is for default, 1 for root hub)
+ *
+ * Now, because we don't use the concept as default address exactly
+ * like the (wired) USB code does, we need to kind of skip it. So we
+ * never take addresses from the urb->pipe, but from the
+ * urb->dev->devnum, to make sure that we always have the right
+ * destination address.
+ */
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include "wusbhc.h"
+#include "wa-hc.h"
+
+static int __rpipe_get_descr(struct wahc *wa,
+ struct usb_rpipe_descriptor *descr, u16 index)
+{
+ ssize_t result;
+ struct device *dev = &wa->usb_iface->dev;
+
+ /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor()
+ * function because the arguments are different.
+ */
+ result = usb_control_msg(
+ wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+ USB_REQ_GET_DESCRIPTOR,
+ USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
+ USB_CTRL_GET_TIMEOUT);
+ if (result < 0) {
+ dev_err(dev, "rpipe %u: get descriptor failed: %d\n",
+ index, (int)result);
+ goto error;
+ }
+ if (result < sizeof(*descr)) {
+ dev_err(dev, "rpipe %u: got short descriptor "
+ "(%zd vs %zd bytes needed)\n",
+ index, result, sizeof(*descr));
+ result = -EINVAL;
+ goto error;
+ }
+ result = 0;
+
+error:
+ return result;
+}
+
+/*
+ *
+ * The descriptor is assumed to be properly initialized (ie: you got
+ * it through __rpipe_get_descr()).
+ */
+static int __rpipe_set_descr(struct wahc *wa,
+ struct usb_rpipe_descriptor *descr, u16 index)
+{
+ ssize_t result;
+ struct device *dev = &wa->usb_iface->dev;
+
+ /* we cannot use the usb_get_descriptor() function because the
+ * arguments are different.
+ */
+ result = usb_control_msg(
+ wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ USB_REQ_SET_DESCRIPTOR,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ USB_DT_RPIPE<<8, index, descr, sizeof(*descr),
+ USB_CTRL_SET_TIMEOUT);
+ if (result < 0) {
+ dev_err(dev, "rpipe %u: set descriptor failed: %d\n",
+ index, (int)result);
+ goto error;
+ }
+ if (result < sizeof(*descr)) {
+ dev_err(dev, "rpipe %u: sent short descriptor "
+ "(%zd vs %zd bytes required)\n",
+ index, result, sizeof(*descr));
+ result = -EINVAL;
+ goto error;
+ }
+ result = 0;
+
+error:
+ return result;
+
+}
+
+static void rpipe_init(struct wa_rpipe *rpipe)
+{
+ kref_init(&rpipe->refcnt);
+ spin_lock_init(&rpipe->seg_lock);
+ INIT_LIST_HEAD(&rpipe->seg_list);
+ INIT_LIST_HEAD(&rpipe->list_node);
+}
+
+static unsigned rpipe_get_idx(struct wahc *wa, unsigned rpipe_idx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
+ rpipe_idx = find_next_zero_bit(wa->rpipe_bm, wa->rpipes, rpipe_idx);
+ if (rpipe_idx < wa->rpipes)
+ set_bit(rpipe_idx, wa->rpipe_bm);
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
+
+ return rpipe_idx;
+}
+
+static void rpipe_put_idx(struct wahc *wa, unsigned rpipe_idx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
+ clear_bit(rpipe_idx, wa->rpipe_bm);
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
+}
+
+void rpipe_destroy(struct kref *_rpipe)
+{
+ struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt);
+ u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+
+ if (rpipe->ep)
+ rpipe->ep->hcpriv = NULL;
+ rpipe_put_idx(rpipe->wa, index);
+ wa_put(rpipe->wa);
+ kfree(rpipe);
+}
+EXPORT_SYMBOL_GPL(rpipe_destroy);
+
+/*
+ * Locate an idle rpipe, create an structure for it and return it
+ *
+ * @wa is referenced and unlocked
+ * @crs enum rpipe_attr, required endpoint characteristics
+ *
+ * The rpipe can be used only sequentially (not in parallel).
+ *
+ * The rpipe is moved into the "ready" state.
+ */
+static int rpipe_get_idle(struct wa_rpipe **prpipe, struct wahc *wa, u8 crs,
+ gfp_t gfp)
+{
+ int result;
+ unsigned rpipe_idx;
+ struct wa_rpipe *rpipe;
+ struct device *dev = &wa->usb_iface->dev;
+
+ rpipe = kzalloc(sizeof(*rpipe), gfp);
+ if (rpipe == NULL)
+ return -ENOMEM;
+ rpipe_init(rpipe);
+
+ /* Look for an idle pipe */
+ for (rpipe_idx = 0; rpipe_idx < wa->rpipes; rpipe_idx++) {
+ rpipe_idx = rpipe_get_idx(wa, rpipe_idx);
+ if (rpipe_idx >= wa->rpipes) /* no more pipes :( */
+ break;
+ result = __rpipe_get_descr(wa, &rpipe->descr, rpipe_idx);
+ if (result < 0)
+ dev_err(dev, "Can't get descriptor for rpipe %u: %d\n",
+ rpipe_idx, result);
+ else if ((rpipe->descr.bmCharacteristics & crs) != 0)
+ goto found;
+ rpipe_put_idx(wa, rpipe_idx);
+ }
+ *prpipe = NULL;
+ kfree(rpipe);
+ return -ENXIO;
+
+found:
+ set_bit(rpipe_idx, wa->rpipe_bm);
+ rpipe->wa = wa_get(wa);
+ *prpipe = rpipe;
+ return 0;
+}
+
+static int __rpipe_reset(struct wahc *wa, unsigned index)
+{
+ int result;
+ struct device *dev = &wa->usb_iface->dev;
+
+ result = usb_control_msg(
+ wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ USB_REQ_RPIPE_RESET,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ if (result < 0)
+ dev_err(dev, "rpipe %u: reset failed: %d\n",
+ index, result);
+ return result;
+}
+
+/*
+ * Fake companion descriptor for ep0
+ *
+ * See WUSB1.0[7.4.4], most of this is zero for bulk/int/ctl
+ */
+static struct usb_wireless_ep_comp_descriptor epc0 = {
+ .bLength = sizeof(epc0),
+ .bDescriptorType = USB_DT_WIRELESS_ENDPOINT_COMP,
+ .bMaxBurst = 1,
+ .bMaxSequence = 2,
+};
+
+/*
+ * Look for EP companion descriptor
+ *
+ * Get there, look for Inara in the endpoint's extra descriptors
+ */
+static struct usb_wireless_ep_comp_descriptor *rpipe_epc_find(
+ struct device *dev, struct usb_host_endpoint *ep)
+{
+ void *itr;
+ size_t itr_size;
+ struct usb_descriptor_header *hdr;
+ struct usb_wireless_ep_comp_descriptor *epcd;
+
+ if (ep->desc.bEndpointAddress == 0) {
+ epcd = &epc0;
+ goto out;
+ }
+ itr = ep->extra;
+ itr_size = ep->extralen;
+ epcd = NULL;
+ while (itr_size > 0) {
+ if (itr_size < sizeof(*hdr)) {
+ dev_err(dev, "HW Bug? ep 0x%02x: extra descriptors "
+ "at offset %zu: only %zu bytes left\n",
+ ep->desc.bEndpointAddress,
+ itr - (void *) ep->extra, itr_size);
+ break;
+ }
+ hdr = itr;
+ if (hdr->bDescriptorType == USB_DT_WIRELESS_ENDPOINT_COMP) {
+ epcd = itr;
+ break;
+ }
+ if (hdr->bLength > itr_size) {
+ dev_err(dev, "HW Bug? ep 0x%02x: extra descriptor "
+ "at offset %zu (type 0x%02x) "
+ "length %d but only %zu bytes left\n",
+ ep->desc.bEndpointAddress,
+ itr - (void *) ep->extra, hdr->bDescriptorType,
+ hdr->bLength, itr_size);
+ break;
+ }
+ itr += hdr->bLength;
+ itr_size -= hdr->bLength;
+ }
+out:
+ return epcd;
+}
+
+/*
+ * Aim an rpipe to its device & endpoint destination
+ *
+ * Make sure we change the address to unauthenticated if the device
+ * is WUSB and it is not authenticated.
+ */
+static int rpipe_aim(struct wa_rpipe *rpipe, struct wahc *wa,
+ struct usb_host_endpoint *ep, struct urb *urb, gfp_t gfp)
+{
+ int result = -ENOMSG; /* better code for lack of companion? */
+ struct device *dev = &wa->usb_iface->dev;
+ struct usb_device *usb_dev = urb->dev;
+ struct usb_wireless_ep_comp_descriptor *epcd;
+ u32 ack_window, epcd_max_sequence;
+ u8 unauth;
+
+ epcd = rpipe_epc_find(dev, ep);
+ if (epcd == NULL) {
+ dev_err(dev, "ep 0x%02x: can't find companion descriptor\n",
+ ep->desc.bEndpointAddress);
+ goto error;
+ }
+ unauth = usb_dev->wusb && !usb_dev->authenticated ? 0x80 : 0;
+ __rpipe_reset(wa, le16_to_cpu(rpipe->descr.wRPipeIndex));
+ atomic_set(&rpipe->segs_available,
+ le16_to_cpu(rpipe->descr.wRequests));
+ /* FIXME: block allocation system; request with queuing and timeout */
+ /* FIXME: compute so seg_size > ep->maxpktsize */
+ rpipe->descr.wBlocks = cpu_to_le16(16); /* given */
+ /* ep0 maxpktsize is 0x200 (WUSB1.0[4.8.1]) */
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+ rpipe->descr.wMaxPacketSize = epcd->wOverTheAirPacketSize;
+ else
+ rpipe->descr.wMaxPacketSize = ep->desc.wMaxPacketSize;
+
+ rpipe->descr.hwa_bMaxBurst = max(min_t(unsigned int,
+ epcd->bMaxBurst, 16U), 1U);
+ rpipe->descr.hwa_bDeviceInfoIndex =
+ wusb_port_no_to_idx(urb->dev->portnum);
+ /* FIXME: use maximum speed as supported or recommended by device */
+ rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ?
+ UWB_PHY_RATE_53 : UWB_PHY_RATE_200;
+
+ dev_dbg(dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n",
+ urb->dev->devnum, urb->dev->devnum | unauth,
+ le16_to_cpu(rpipe->descr.wRPipeIndex),
+ usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed);
+
+ rpipe->descr.hwa_reserved = 0;
+
+ rpipe->descr.bEndpointAddress = ep->desc.bEndpointAddress;
+ /* FIXME: bDataSequence */
+ rpipe->descr.bDataSequence = 0;
+
+ /* start with base window of hwa_bMaxBurst bits starting at 0. */
+ ack_window = 0xFFFFFFFF >> (32 - rpipe->descr.hwa_bMaxBurst);
+ rpipe->descr.dwCurrentWindow = cpu_to_le32(ack_window);
+ epcd_max_sequence = max(min_t(unsigned int,
+ epcd->bMaxSequence, 32U), 2U);
+ rpipe->descr.bMaxDataSequence = epcd_max_sequence - 1;
+ rpipe->descr.bInterval = ep->desc.bInterval;
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+ rpipe->descr.bOverTheAirInterval = epcd->bOverTheAirInterval;
+ else
+ rpipe->descr.bOverTheAirInterval = 0; /* 0 if not isoc */
+ /* FIXME: xmit power & preamble blah blah */
+ rpipe->descr.bmAttribute = (ep->desc.bmAttributes &
+ USB_ENDPOINT_XFERTYPE_MASK);
+ /* rpipe->descr.bmCharacteristics RO */
+ rpipe->descr.bmRetryOptions = (wa->wusb->retry_count & 0xF);
+ /* FIXME: use for assessing link quality? */
+ rpipe->descr.wNumTransactionErrors = 0;
+ result = __rpipe_set_descr(wa, &rpipe->descr,
+ le16_to_cpu(rpipe->descr.wRPipeIndex));
+ if (result < 0) {
+ dev_err(dev, "Cannot aim rpipe: %d\n", result);
+ goto error;
+ }
+ result = 0;
+error:
+ return result;
+}
+
+/*
+ * Check an aimed rpipe to make sure it points to where we want
+ *
+ * We use bit 19 of the Linux USB pipe bitmap for unauth vs auth
+ * space; when it is like that, we or 0x80 to make an unauth address.
+ */
+static int rpipe_check_aim(const struct wa_rpipe *rpipe, const struct wahc *wa,
+ const struct usb_host_endpoint *ep,
+ const struct urb *urb, gfp_t gfp)
+{
+ int result = 0;
+ struct device *dev = &wa->usb_iface->dev;
+ u8 portnum = wusb_port_no_to_idx(urb->dev->portnum);
+
+#define AIM_CHECK(rdf, val, text) \
+ do { \
+ if (rpipe->descr.rdf != (val)) { \
+ dev_err(dev, \
+ "rpipe aim discrepancy: " #rdf " " text "\n", \
+ rpipe->descr.rdf, (val)); \
+ result = -EINVAL; \
+ WARN_ON(1); \
+ } \
+ } while (0)
+ AIM_CHECK(hwa_bDeviceInfoIndex, portnum, "(%u vs %u)");
+ AIM_CHECK(bSpeed, usb_pipeendpoint(urb->pipe) == 0 ?
+ UWB_PHY_RATE_53 : UWB_PHY_RATE_200,
+ "(%u vs %u)");
+ AIM_CHECK(bEndpointAddress, ep->desc.bEndpointAddress, "(%u vs %u)");
+ AIM_CHECK(bInterval, ep->desc.bInterval, "(%u vs %u)");
+ AIM_CHECK(bmAttribute, ep->desc.bmAttributes & 0x03, "(%u vs %u)");
+#undef AIM_CHECK
+ return result;
+}
+
+#ifndef CONFIG_BUG
+#define CONFIG_BUG 0
+#endif
+
+/*
+ * Make sure there is an rpipe allocated for an endpoint
+ *
+ * If already allocated, we just refcount it; if not, we get an
+ * idle one, aim it to the right location and take it.
+ *
+ * Attaches to ep->hcpriv and rpipe->ep to ep.
+ */
+int rpipe_get_by_ep(struct wahc *wa, struct usb_host_endpoint *ep,
+ struct urb *urb, gfp_t gfp)
+{
+ int result = 0;
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_rpipe *rpipe;
+ u8 eptype;
+
+ mutex_lock(&wa->rpipe_mutex);
+ rpipe = ep->hcpriv;
+ if (rpipe != NULL) {
+ if (CONFIG_BUG == 1) {
+ result = rpipe_check_aim(rpipe, wa, ep, urb, gfp);
+ if (result < 0)
+ goto error;
+ }
+ __rpipe_get(rpipe);
+ dev_dbg(dev, "ep 0x%02x: reusing rpipe %u\n",
+ ep->desc.bEndpointAddress,
+ le16_to_cpu(rpipe->descr.wRPipeIndex));
+ } else {
+ /* hmm, assign idle rpipe, aim it */
+ result = -ENOBUFS;
+ eptype = ep->desc.bmAttributes & 0x03;
+ result = rpipe_get_idle(&rpipe, wa, 1 << eptype, gfp);
+ if (result < 0)
+ goto error;
+ result = rpipe_aim(rpipe, wa, ep, urb, gfp);
+ if (result < 0) {
+ rpipe_put(rpipe);
+ goto error;
+ }
+ ep->hcpriv = rpipe;
+ rpipe->ep = ep;
+ __rpipe_get(rpipe); /* for caching into ep->hcpriv */
+ dev_dbg(dev, "ep 0x%02x: using rpipe %u\n",
+ ep->desc.bEndpointAddress,
+ le16_to_cpu(rpipe->descr.wRPipeIndex));
+ }
+error:
+ mutex_unlock(&wa->rpipe_mutex);
+ return result;
+}
+
+/*
+ * Allocate the bitmap for each rpipe.
+ */
+int wa_rpipes_create(struct wahc *wa)
+{
+ wa->rpipes = le16_to_cpu(wa->wa_descr->wNumRPipes);
+ wa->rpipe_bm = bitmap_zalloc(wa->rpipes, GFP_KERNEL);
+ if (wa->rpipe_bm == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void wa_rpipes_destroy(struct wahc *wa)
+{
+ struct device *dev = &wa->usb_iface->dev;
+
+ if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) {
+ WARN_ON(1);
+ dev_err(dev, "BUG: pipes not released on exit: %*pb\n",
+ wa->rpipes, wa->rpipe_bm);
+ }
+ bitmap_free(wa->rpipe_bm);
+}
+
+/*
+ * Release resources allocated for an endpoint
+ *
+ * If there is an associated rpipe to this endpoint, Abort any pending
+ * transfers and put it. If the rpipe ends up being destroyed,
+ * __rpipe_destroy() will cleanup ep->hcpriv.
+ *
+ * This is called before calling hcd->stop(), so you don't need to do
+ * anything else in there.
+ */
+void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
+{
+ struct wa_rpipe *rpipe;
+
+ mutex_lock(&wa->rpipe_mutex);
+ rpipe = ep->hcpriv;
+ if (rpipe != NULL) {
+ u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+
+ usb_control_msg(
+ wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ USB_REQ_RPIPE_ABORT,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ 0, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ rpipe_put(rpipe);
+ }
+ mutex_unlock(&wa->rpipe_mutex);
+}
+EXPORT_SYMBOL_GPL(rpipe_ep_disable);
+
+/* Clear the stalled status of an RPIPE. */
+void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)
+{
+ struct wa_rpipe *rpipe;
+
+ mutex_lock(&wa->rpipe_mutex);
+ rpipe = ep->hcpriv;
+ if (rpipe != NULL) {
+ u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+
+ usb_control_msg(
+ wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ RPIPE_STALL, index, NULL, 0, USB_CTRL_SET_TIMEOUT);
+ }
+ mutex_unlock(&wa->rpipe_mutex);
+}
+EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled);
diff --git a/drivers/staging/wusbcore/wa-xfer.c b/drivers/staging/wusbcore/wa-xfer.c
new file mode 100644
index 000000000000..abf88cea37bb
--- /dev/null
+++ b/drivers/staging/wusbcore/wa-xfer.c
@@ -0,0 +1,2927 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * WUSB Wire Adapter
+ * Data transfer and URB enqueing
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * How transfers work: get a buffer, break it up in segments (segment
+ * size is a multiple of the maxpacket size). For each segment issue a
+ * segment request (struct wa_xfer_*), then send the data buffer if
+ * out or nothing if in (all over the DTO endpoint).
+ *
+ * For each submitted segment request, a notification will come over
+ * the NEP endpoint and a transfer result (struct xfer_result) will
+ * arrive in the DTI URB. Read it, get the xfer ID, see if there is
+ * data coming (inbound transfer), schedule a read and handle it.
+ *
+ * Sounds simple, it is a pain to implement.
+ *
+ *
+ * ENTRY POINTS
+ *
+ * FIXME
+ *
+ * LIFE CYCLE / STATE DIAGRAM
+ *
+ * FIXME
+ *
+ * THIS CODE IS DISGUSTING
+ *
+ * Warned you are; it's my second try and still not happy with it.
+ *
+ * NOTES:
+ *
+ * - No iso
+ *
+ * - Supports DMA xfers, control, bulk and maybe interrupt
+ *
+ * - Does not recycle unused rpipes
+ *
+ * An rpipe is assigned to an endpoint the first time it is used,
+ * and then it's there, assigned, until the endpoint is disabled
+ * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
+ * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
+ * (should be a mutex).
+ *
+ * Two methods it could be done:
+ *
+ * (a) set up a timer every time an rpipe's use count drops to 1
+ * (which means unused) or when a transfer ends. Reset the
+ * timer when a xfer is queued. If the timer expires, release
+ * the rpipe [see rpipe_ep_disable()].
+ *
+ * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
+ * when none are found go over the list, check their endpoint
+ * and their activity record (if no last-xfer-done-ts in the
+ * last x seconds) take it
+ *
+ * However, due to the fact that we have a set of limited
+ * resources (max-segments-at-the-same-time per xfer,
+ * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
+ * we are going to have to rebuild all this based on an scheduler,
+ * to where we have a list of transactions to do and based on the
+ * availability of the different required components (blocks,
+ * rpipes, segment slots, etc), we go scheduling them. Painful.
+ */
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/hash.h>
+#include <linux/ratelimit.h>
+#include <linux/export.h>
+#include <linux/scatterlist.h>
+
+#include "wa-hc.h"
+#include "wusbhc.h"
+
+enum {
+ /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
+ WA_SEGS_MAX = 128,
+};
+
+enum wa_seg_status {
+ WA_SEG_NOTREADY,
+ WA_SEG_READY,
+ WA_SEG_DELAYED,
+ WA_SEG_SUBMITTED,
+ WA_SEG_PENDING,
+ WA_SEG_DTI_PENDING,
+ WA_SEG_DONE,
+ WA_SEG_ERROR,
+ WA_SEG_ABORTED,
+};
+
+static void wa_xfer_delayed_run(struct wa_rpipe *);
+static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting);
+
+/*
+ * Life cycle governed by 'struct urb' (the refcount of the struct is
+ * that of the 'struct urb' and usb_free_urb() would free the whole
+ * struct).
+ */
+struct wa_seg {
+ struct urb tr_urb; /* transfer request urb. */
+ struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */
+ struct urb *dto_urb; /* for data output. */
+ struct list_head list_node; /* for rpipe->req_list */
+ struct wa_xfer *xfer; /* out xfer */
+ u8 index; /* which segment we are */
+ int isoc_frame_count; /* number of isoc frames in this segment. */
+ int isoc_frame_offset; /* starting frame offset in the xfer URB. */
+ /* Isoc frame that the current transfer buffer corresponds to. */
+ int isoc_frame_index;
+ int isoc_size; /* size of all isoc frames sent by this seg. */
+ enum wa_seg_status status;
+ ssize_t result; /* bytes xfered or error */
+ struct wa_xfer_hdr xfer_hdr;
+};
+
+static inline void wa_seg_init(struct wa_seg *seg)
+{
+ usb_init_urb(&seg->tr_urb);
+
+ /* set the remaining memory to 0. */
+ memset(((void *)seg) + sizeof(seg->tr_urb), 0,
+ sizeof(*seg) - sizeof(seg->tr_urb));
+}
+
+/*
+ * Protected by xfer->lock
+ *
+ */
+struct wa_xfer {
+ struct kref refcnt;
+ struct list_head list_node;
+ spinlock_t lock;
+ u32 id;
+
+ struct wahc *wa; /* Wire adapter we are plugged to */
+ struct usb_host_endpoint *ep;
+ struct urb *urb; /* URB we are transferring for */
+ struct wa_seg **seg; /* transfer segments */
+ u8 segs, segs_submitted, segs_done;
+ unsigned is_inbound:1;
+ unsigned is_dma:1;
+ size_t seg_size;
+ int result;
+
+ gfp_t gfp; /* allocation mask */
+
+ struct wusb_dev *wusb_dev; /* for activity timestamps */
+};
+
+static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
+ struct wa_seg *seg, int curr_iso_frame);
+static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
+ int starting_index, enum wa_seg_status status);
+
+static inline void wa_xfer_init(struct wa_xfer *xfer)
+{
+ kref_init(&xfer->refcnt);
+ INIT_LIST_HEAD(&xfer->list_node);
+ spin_lock_init(&xfer->lock);
+}
+
+/*
+ * Destroy a transfer structure
+ *
+ * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
+ * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
+ */
+static void wa_xfer_destroy(struct kref *_xfer)
+{
+ struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
+ if (xfer->seg) {
+ unsigned cnt;
+ for (cnt = 0; cnt < xfer->segs; cnt++) {
+ struct wa_seg *seg = xfer->seg[cnt];
+ if (seg) {
+ usb_free_urb(seg->isoc_pack_desc_urb);
+ if (seg->dto_urb) {
+ kfree(seg->dto_urb->sg);
+ usb_free_urb(seg->dto_urb);
+ }
+ usb_free_urb(&seg->tr_urb);
+ }
+ }
+ kfree(xfer->seg);
+ }
+ kfree(xfer);
+}
+
+static void wa_xfer_get(struct wa_xfer *xfer)
+{
+ kref_get(&xfer->refcnt);
+}
+
+static void wa_xfer_put(struct wa_xfer *xfer)
+{
+ kref_put(&xfer->refcnt, wa_xfer_destroy);
+}
+
+/*
+ * Try to get exclusive access to the DTO endpoint resource. Return true
+ * if successful.
+ */
+static inline int __wa_dto_try_get(struct wahc *wa)
+{
+ return (test_and_set_bit(0, &wa->dto_in_use) == 0);
+}
+
+/* Release the DTO endpoint resource. */
+static inline void __wa_dto_put(struct wahc *wa)
+{
+ clear_bit_unlock(0, &wa->dto_in_use);
+}
+
+/* Service RPIPEs that are waiting on the DTO resource. */
+static void wa_check_for_delayed_rpipes(struct wahc *wa)
+{
+ unsigned long flags;
+ int dto_waiting = 0;
+ struct wa_rpipe *rpipe;
+
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
+ while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) {
+ rpipe = list_first_entry(&wa->rpipe_delayed_list,
+ struct wa_rpipe, list_node);
+ __wa_xfer_delayed_run(rpipe, &dto_waiting);
+ /* remove this RPIPE from the list if it is not waiting. */
+ if (!dto_waiting) {
+ pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
+ __func__,
+ le16_to_cpu(rpipe->descr.wRPipeIndex));
+ list_del_init(&rpipe->list_node);
+ }
+ }
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
+}
+
+/* add this RPIPE to the end of the delayed RPIPE list. */
+static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wa->rpipe_lock, flags);
+ /* add rpipe to the list if it is not already on it. */
+ if (list_empty(&rpipe->list_node)) {
+ pr_debug("%s: adding RPIPE %d to the delayed list.\n",
+ __func__, le16_to_cpu(rpipe->descr.wRPipeIndex));
+ list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list);
+ }
+ spin_unlock_irqrestore(&wa->rpipe_lock, flags);
+}
+
+/*
+ * xfer is referenced
+ *
+ * xfer->lock has to be unlocked
+ *
+ * We take xfer->lock for setting the result; this is a barrier
+ * against drivers/usb/core/hcd.c:unlink1() being called after we call
+ * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
+ * reference to the transfer.
+ */
+static void wa_xfer_giveback(struct wa_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
+ list_del_init(&xfer->list_node);
+ usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb);
+ spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
+ /* FIXME: segmentation broken -- kills DWA */
+ wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
+ wa_put(xfer->wa);
+ wa_xfer_put(xfer);
+}
+
+/*
+ * xfer is referenced
+ *
+ * xfer->lock has to be unlocked
+ */
+static void wa_xfer_completion(struct wa_xfer *xfer)
+{
+ if (xfer->wusb_dev)
+ wusb_dev_put(xfer->wusb_dev);
+ rpipe_put(xfer->ep->hcpriv);
+ wa_xfer_giveback(xfer);
+}
+
+/*
+ * Initialize a transfer's ID
+ *
+ * We need to use a sequential number; if we use the pointer or the
+ * hash of the pointer, it can repeat over sequential transfers and
+ * then it will confuse the HWA....wonder why in hell they put a 32
+ * bit handle in there then.
+ */
+static void wa_xfer_id_init(struct wa_xfer *xfer)
+{
+ xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
+}
+
+/* Return the xfer's ID. */
+static inline u32 wa_xfer_id(struct wa_xfer *xfer)
+{
+ return xfer->id;
+}
+
+/* Return the xfer's ID in transport format (little endian). */
+static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer)
+{
+ return cpu_to_le32(xfer->id);
+}
+
+/*
+ * If transfer is done, wrap it up and return true
+ *
+ * xfer->lock has to be locked
+ */
+static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
+{
+ struct device *dev = &xfer->wa->usb_iface->dev;
+ unsigned result, cnt;
+ struct wa_seg *seg;
+ struct urb *urb = xfer->urb;
+ unsigned found_short = 0;
+
+ result = xfer->segs_done == xfer->segs_submitted;
+ if (result == 0)
+ goto out;
+ urb->actual_length = 0;
+ for (cnt = 0; cnt < xfer->segs; cnt++) {
+ seg = xfer->seg[cnt];
+ switch (seg->status) {
+ case WA_SEG_DONE:
+ if (found_short && seg->result > 0) {
+ dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
+ xfer, wa_xfer_id(xfer), cnt,
+ seg->result);
+ urb->status = -EINVAL;
+ goto out;
+ }
+ urb->actual_length += seg->result;
+ if (!(usb_pipeisoc(xfer->urb->pipe))
+ && seg->result < xfer->seg_size
+ && cnt != xfer->segs-1)
+ found_short = 1;
+ dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d "
+ "result %zu urb->actual_length %d\n",
+ xfer, wa_xfer_id(xfer), seg->index, found_short,
+ seg->result, urb->actual_length);
+ break;
+ case WA_SEG_ERROR:
+ xfer->result = seg->result;
+ dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
+ xfer, wa_xfer_id(xfer), seg->index, seg->result,
+ seg->result);
+ goto out;
+ case WA_SEG_ABORTED:
+ xfer->result = seg->result;
+ dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
+ xfer, wa_xfer_id(xfer), seg->index, seg->result,
+ seg->result);
+ goto out;
+ default:
+ dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n",
+ xfer, wa_xfer_id(xfer), cnt, seg->status);
+ xfer->result = -EINVAL;
+ goto out;
+ }
+ }
+ xfer->result = 0;
+out:
+ return result;
+}
+
+/*
+ * Mark the given segment as done. Return true if this completes the xfer.
+ * This should only be called for segs that have been submitted to an RPIPE.
+ * Delayed segs are not marked as submitted so they do not need to be marked
+ * as done when cleaning up.
+ *
+ * xfer->lock has to be locked
+ */
+static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer,
+ struct wa_seg *seg, enum wa_seg_status status)
+{
+ seg->status = status;
+ xfer->segs_done++;
+
+ /* check for done. */
+ return __wa_xfer_is_done(xfer);
+}
+
+/*
+ * Search for a transfer list ID on the HCD's URB list
+ *
+ * For 32 bit architectures, we use the pointer itself; for 64 bits, a
+ * 32-bit hash of the pointer.
+ *
+ * @returns NULL if not found.
+ */
+static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
+{
+ unsigned long flags;
+ struct wa_xfer *xfer_itr;
+ spin_lock_irqsave(&wa->xfer_list_lock, flags);
+ list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
+ if (id == xfer_itr->id) {
+ wa_xfer_get(xfer_itr);
+ goto out;
+ }
+ }
+ xfer_itr = NULL;
+out:
+ spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
+ return xfer_itr;
+}
+
+struct wa_xfer_abort_buffer {
+ struct urb urb;
+ struct wahc *wa;
+ struct wa_xfer_abort cmd;
+};
+
+static void __wa_xfer_abort_cb(struct urb *urb)
+{
+ struct wa_xfer_abort_buffer *b = urb->context;
+ struct wahc *wa = b->wa;
+
+ /*
+ * If the abort request URB failed, then the HWA did not get the abort
+ * command. Forcibly clean up the xfer without waiting for a Transfer
+ * Result from the HWA.
+ */
+ if (urb->status < 0) {
+ struct wa_xfer *xfer;
+ struct device *dev = &wa->usb_iface->dev;
+
+ xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID));
+ dev_err(dev, "%s: Transfer Abort request failed. result: %d\n",
+ __func__, urb->status);
+ if (xfer) {
+ unsigned long flags;
+ int done, seg_index = 0;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ dev_err(dev, "%s: cleaning up xfer %p ID 0x%08X.\n",
+ __func__, xfer, wa_xfer_id(xfer));
+ spin_lock_irqsave(&xfer->lock, flags);
+ /* skip done segs. */
+ while (seg_index < xfer->segs) {
+ struct wa_seg *seg = xfer->seg[seg_index];
+
+ if ((seg->status == WA_SEG_DONE) ||
+ (seg->status == WA_SEG_ERROR)) {
+ ++seg_index;
+ } else {
+ break;
+ }
+ }
+ /* mark remaining segs as aborted. */
+ wa_complete_remaining_xfer_segs(xfer, seg_index,
+ WA_SEG_ABORTED);
+ done = __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ wa_xfer_delayed_run(rpipe);
+ wa_xfer_put(xfer);
+ } else {
+ dev_err(dev, "%s: xfer ID 0x%08X already gone.\n",
+ __func__, le32_to_cpu(b->cmd.dwTransferID));
+ }
+ }
+
+ wa_put(wa); /* taken in __wa_xfer_abort */
+ usb_put_urb(&b->urb);
+}
+
+/*
+ * Aborts an ongoing transaction
+ *
+ * Assumes the transfer is referenced and locked and in a submitted
+ * state (mainly that there is an endpoint/rpipe assigned).
+ *
+ * The callback (see above) does nothing but freeing up the data by
+ * putting the URB. Because the URB is allocated at the head of the
+ * struct, the whole space we allocated is kfreed. *
+ */
+static int __wa_xfer_abort(struct wa_xfer *xfer)
+{
+ int result = -ENOMEM;
+ struct device *dev = &xfer->wa->usb_iface->dev;
+ struct wa_xfer_abort_buffer *b;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ b = kmalloc(sizeof(*b), GFP_ATOMIC);
+ if (b == NULL)
+ goto error_kmalloc;
+ b->cmd.bLength = sizeof(b->cmd);
+ b->cmd.bRequestType = WA_XFER_ABORT;
+ b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
+ b->cmd.dwTransferID = wa_xfer_id_le32(xfer);
+ b->wa = wa_get(xfer->wa);
+
+ usb_init_urb(&b->urb);
+ usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
+ usb_sndbulkpipe(xfer->wa->usb_dev,
+ xfer->wa->dto_epd->bEndpointAddress),
+ &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
+ result = usb_submit_urb(&b->urb, GFP_ATOMIC);
+ if (result < 0)
+ goto error_submit;
+ return result; /* callback frees! */
+
+
+error_submit:
+ wa_put(xfer->wa);
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
+ xfer, result);
+ kfree(b);
+error_kmalloc:
+ return result;
+
+}
+
+/*
+ * Calculate the number of isoc frames starting from isoc_frame_offset
+ * that will fit a in transfer segment.
+ */
+static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
+ int isoc_frame_offset, int *total_size)
+{
+ int segment_size = 0, frame_count = 0;
+ int index = isoc_frame_offset;
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+
+ while ((index < xfer->urb->number_of_packets)
+ && ((segment_size + iso_frame_desc[index].length)
+ <= xfer->seg_size)) {
+ /*
+ * For Alereon HWA devices, only include an isoc frame in an
+ * out segment if it is physically contiguous with the previous
+ * frame. This is required because those devices expect
+ * the isoc frames to be sent as a single USB transaction as
+ * opposed to one transaction per frame with standard HWA.
+ */
+ if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+ && (xfer->is_inbound == 0)
+ && (index > isoc_frame_offset)
+ && ((iso_frame_desc[index - 1].offset +
+ iso_frame_desc[index - 1].length) !=
+ iso_frame_desc[index].offset))
+ break;
+
+ /* this frame fits. count it. */
+ ++frame_count;
+ segment_size += iso_frame_desc[index].length;
+
+ /* move to the next isoc frame. */
+ ++index;
+ }
+
+ *total_size = segment_size;
+ return frame_count;
+}
+
+/*
+ *
+ * @returns < 0 on error, transfer segment request size if ok
+ */
+static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
+ enum wa_xfer_type *pxfer_type)
+{
+ ssize_t result;
+ struct device *dev = &xfer->wa->usb_iface->dev;
+ size_t maxpktsize;
+ struct urb *urb = xfer->urb;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ switch (rpipe->descr.bmAttribute & 0x3) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ *pxfer_type = WA_XFER_TYPE_CTL;
+ result = sizeof(struct wa_xfer_ctl);
+ break;
+ case USB_ENDPOINT_XFER_INT:
+ case USB_ENDPOINT_XFER_BULK:
+ *pxfer_type = WA_XFER_TYPE_BI;
+ result = sizeof(struct wa_xfer_bi);
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ *pxfer_type = WA_XFER_TYPE_ISO;
+ result = sizeof(struct wa_xfer_hwaiso);
+ break;
+ default:
+ /* never happens */
+ BUG();
+ result = -EINVAL; /* shut gcc up */
+ }
+ xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
+ xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
+
+ maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
+ xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
+ * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
+ /* Compute the segment size and make sure it is a multiple of
+ * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
+ * a check (FIXME) */
+ if (xfer->seg_size < maxpktsize) {
+ dev_err(dev,
+ "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
+ xfer->seg_size, maxpktsize);
+ result = -EINVAL;
+ goto error;
+ }
+ xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
+ if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
+ int index = 0;
+
+ xfer->segs = 0;
+ /*
+ * loop over urb->number_of_packets to determine how many
+ * xfer segments will be needed to send the isoc frames.
+ */
+ while (index < urb->number_of_packets) {
+ int seg_size; /* don't care. */
+ index += __wa_seg_calculate_isoc_frame_count(xfer,
+ index, &seg_size);
+ ++xfer->segs;
+ }
+ } else {
+ xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
+ xfer->seg_size);
+ if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
+ xfer->segs = 1;
+ }
+
+ if (xfer->segs > WA_SEGS_MAX) {
+ dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n",
+ (urb->transfer_buffer_length/xfer->seg_size),
+ WA_SEGS_MAX);
+ result = -EINVAL;
+ goto error;
+ }
+error:
+ return result;
+}
+
+static void __wa_setup_isoc_packet_descr(
+ struct wa_xfer_packet_info_hwaiso *packet_desc,
+ struct wa_xfer *xfer,
+ struct wa_seg *seg) {
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ int frame_index;
+
+ /* populate isoc packet descriptor. */
+ packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO;
+ packet_desc->wLength = cpu_to_le16(struct_size(packet_desc,
+ PacketLength,
+ seg->isoc_frame_count));
+ for (frame_index = 0; frame_index < seg->isoc_frame_count;
+ ++frame_index) {
+ int offset_index = frame_index + seg->isoc_frame_offset;
+ packet_desc->PacketLength[frame_index] =
+ cpu_to_le16(iso_frame_desc[offset_index].length);
+ }
+}
+
+
+/* Fill in the common request header and xfer-type specific data. */
+static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
+ struct wa_xfer_hdr *xfer_hdr0,
+ enum wa_xfer_type xfer_type,
+ size_t xfer_hdr_size)
+{
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+ struct wa_seg *seg = xfer->seg[0];
+
+ xfer_hdr0 = &seg->xfer_hdr;
+ xfer_hdr0->bLength = xfer_hdr_size;
+ xfer_hdr0->bRequestType = xfer_type;
+ xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
+ xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer);
+ xfer_hdr0->bTransferSegment = 0;
+ switch (xfer_type) {
+ case WA_XFER_TYPE_CTL: {
+ struct wa_xfer_ctl *xfer_ctl =
+ container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
+ xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
+ memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
+ sizeof(xfer_ctl->baSetupData));
+ break;
+ }
+ case WA_XFER_TYPE_BI:
+ break;
+ case WA_XFER_TYPE_ISO: {
+ struct wa_xfer_hwaiso *xfer_iso =
+ container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr);
+ struct wa_xfer_packet_info_hwaiso *packet_desc =
+ ((void *)xfer_iso) + xfer_hdr_size;
+
+ /* populate the isoc section of the transfer request. */
+ xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count);
+ /* populate isoc packet descriptor. */
+ __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
+ break;
+ }
+ default:
+ BUG();
+ };
+}
+
+/*
+ * Callback for the OUT data phase of the segment request
+ *
+ * Check wa_seg_tr_cb(); most comments also apply here because this
+ * function does almost the same thing and they work closely
+ * together.
+ *
+ * If the seg request has failed but this DTO phase has succeeded,
+ * wa_seg_tr_cb() has already failed the segment and moved the
+ * status to WA_SEG_ERROR, so this will go through 'case 0' and
+ * effectively do nothing.
+ */
+static void wa_seg_dto_cb(struct urb *urb)
+{
+ struct wa_seg *seg = urb->context;
+ struct wa_xfer *xfer = seg->xfer;
+ struct wahc *wa;
+ struct device *dev;
+ struct wa_rpipe *rpipe;
+ unsigned long flags;
+ unsigned rpipe_ready = 0;
+ int data_send_done = 1, release_dto = 0, holding_dto = 0;
+ u8 done = 0;
+ int result;
+
+ /* free the sg if it was used. */
+ kfree(urb->sg);
+ urb->sg = NULL;
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ /* Alereon HWA sends all isoc frames in a single transfer. */
+ if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+ seg->isoc_frame_index += seg->isoc_frame_count;
+ else
+ seg->isoc_frame_index += 1;
+ if (seg->isoc_frame_index < seg->isoc_frame_count) {
+ data_send_done = 0;
+ holding_dto = 1; /* checked in error cases. */
+ /*
+ * if this is the last isoc frame of the segment, we
+ * can release DTO after sending this frame.
+ */
+ if ((seg->isoc_frame_index + 1) >=
+ seg->isoc_frame_count)
+ release_dto = 1;
+ }
+ dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
+ wa_xfer_id(xfer), seg->index, seg->isoc_frame_index,
+ holding_dto, release_dto);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ switch (urb->status) {
+ case 0:
+ spin_lock_irqsave(&xfer->lock, flags);
+ seg->result += urb->actual_length;
+ if (data_send_done) {
+ dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
+ wa_xfer_id(xfer), seg->index, seg->result);
+ if (seg->status < WA_SEG_PENDING)
+ seg->status = WA_SEG_PENDING;
+ } else {
+ /* should only hit this for isoc xfers. */
+ /*
+ * Populate the dto URB with the next isoc frame buffer,
+ * send the URB and release DTO if we no longer need it.
+ */
+ __wa_populate_dto_urb_isoc(xfer, seg,
+ seg->isoc_frame_offset + seg->isoc_frame_index);
+
+ /* resubmit the URB with the next isoc frame. */
+ /* take a ref on resubmit. */
+ wa_xfer_get(xfer);
+ result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n",
+ wa_xfer_id(xfer), seg->index, result);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ goto error_dto_submit;
+ }
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (release_dto) {
+ __wa_dto_put(wa);
+ wa_check_for_delayed_rpipes(wa);
+ }
+ break;
+ case -ECONNRESET: /* URB unlinked; no need to do anything */
+ case -ENOENT: /* as it was done by the who unlinked us */
+ if (holding_dto) {
+ __wa_dto_put(wa);
+ wa_check_for_delayed_rpipes(wa);
+ }
+ break;
+ default: /* Other errors ... */
+ dev_err(dev, "xfer 0x%08X#%u: data out error %d\n",
+ wa_xfer_id(xfer), seg->index, urb->status);
+ goto error_default;
+ }
+
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
+ return;
+
+error_dto_submit:
+ /* taken on resubmit attempt. */
+ wa_xfer_put(xfer);
+error_default:
+ spin_lock_irqsave(&xfer->lock, flags);
+ rpipe = xfer->ep->hcpriv;
+ if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)){
+ dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ if (seg->status != WA_SEG_ERROR) {
+ seg->result = urb->status;
+ __wa_xfer_abort(xfer);
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (holding_dto) {
+ __wa_dto_put(wa);
+ wa_check_for_delayed_rpipes(wa);
+ }
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
+}
+
+/*
+ * Callback for the isoc packet descriptor phase of the segment request
+ *
+ * Check wa_seg_tr_cb(); most comments also apply here because this
+ * function does almost the same thing and they work closely
+ * together.
+ *
+ * If the seg request has failed but this phase has succeeded,
+ * wa_seg_tr_cb() has already failed the segment and moved the
+ * status to WA_SEG_ERROR, so this will go through 'case 0' and
+ * effectively do nothing.
+ */
+static void wa_seg_iso_pack_desc_cb(struct urb *urb)
+{
+ struct wa_seg *seg = urb->context;
+ struct wa_xfer *xfer = seg->xfer;
+ struct wahc *wa;
+ struct device *dev;
+ struct wa_rpipe *rpipe;
+ unsigned long flags;
+ unsigned rpipe_ready = 0;
+ u8 done = 0;
+
+ switch (urb->status) {
+ case 0:
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n",
+ wa_xfer_id(xfer), seg->index);
+ if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
+ seg->status = WA_SEG_PENDING;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ break;
+ case -ECONNRESET: /* URB unlinked; no need to do anything */
+ case -ENOENT: /* as it was done by the who unlinked us */
+ break;
+ default: /* Other errors ... */
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ rpipe = xfer->ep->hcpriv;
+ pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
+ wa_xfer_id(xfer), seg->index, urb->status);
+ if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)){
+ dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ if (seg->status != WA_SEG_ERROR) {
+ usb_unlink_urb(seg->dto_urb);
+ seg->result = urb->status;
+ __wa_xfer_abort(xfer);
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_ERROR);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ }
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
+}
+
+/*
+ * Callback for the segment request
+ *
+ * If successful transition state (unless already transitioned or
+ * outbound transfer); otherwise, take a note of the error, mark this
+ * segment done and try completion.
+ *
+ * Note we don't access until we are sure that the transfer hasn't
+ * been cancelled (ECONNRESET, ENOENT), which could mean that
+ * seg->xfer could be already gone.
+ *
+ * We have to check before setting the status to WA_SEG_PENDING
+ * because sometimes the xfer result callback arrives before this
+ * callback (geeeeeeze), so it might happen that we are already in
+ * another state. As well, we don't set it if the transfer is not inbound,
+ * as in that case, wa_seg_dto_cb will do it when the OUT data phase
+ * finishes.
+ */
+static void wa_seg_tr_cb(struct urb *urb)
+{
+ struct wa_seg *seg = urb->context;
+ struct wa_xfer *xfer = seg->xfer;
+ struct wahc *wa;
+ struct device *dev;
+ struct wa_rpipe *rpipe;
+ unsigned long flags;
+ unsigned rpipe_ready;
+ u8 done = 0;
+
+ switch (urb->status) {
+ case 0:
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n",
+ xfer, wa_xfer_id(xfer), seg->index);
+ if (xfer->is_inbound &&
+ seg->status < WA_SEG_PENDING &&
+ !(usb_pipeisoc(xfer->urb->pipe)))
+ seg->status = WA_SEG_PENDING;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ break;
+ case -ECONNRESET: /* URB unlinked; no need to do anything */
+ case -ENOENT: /* as it was done by the who unlinked us */
+ break;
+ default: /* Other errors ... */
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ rpipe = xfer->ep->hcpriv;
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ urb->status);
+ if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)){
+ dev_err(dev, "DTO: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ usb_unlink_urb(seg->isoc_pack_desc_urb);
+ usb_unlink_urb(seg->dto_urb);
+ seg->result = urb->status;
+ __wa_xfer_abort(xfer);
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ }
+ /* taken when this URB was submitted. */
+ wa_xfer_put(xfer);
+}
+
+/*
+ * Allocate an SG list to store bytes_to_transfer bytes and copy the
+ * subset of the in_sg that matches the buffer subset
+ * we are about to transfer.
+ */
+static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg,
+ const unsigned int bytes_transferred,
+ const unsigned int bytes_to_transfer, int *out_num_sgs)
+{
+ struct scatterlist *out_sg;
+ unsigned int bytes_processed = 0, offset_into_current_page_data = 0,
+ nents;
+ struct scatterlist *current_xfer_sg = in_sg;
+ struct scatterlist *current_seg_sg, *last_seg_sg;
+
+ /* skip previously transferred pages. */
+ while ((current_xfer_sg) &&
+ (bytes_processed < bytes_transferred)) {
+ bytes_processed += current_xfer_sg->length;
+
+ /* advance the sg if current segment starts on or past the
+ next page. */
+ if (bytes_processed <= bytes_transferred)
+ current_xfer_sg = sg_next(current_xfer_sg);
+ }
+
+ /* the data for the current segment starts in current_xfer_sg.
+ calculate the offset. */
+ if (bytes_processed > bytes_transferred) {
+ offset_into_current_page_data = current_xfer_sg->length -
+ (bytes_processed - bytes_transferred);
+ }
+
+ /* calculate the number of pages needed by this segment. */
+ nents = DIV_ROUND_UP((bytes_to_transfer +
+ offset_into_current_page_data +
+ current_xfer_sg->offset),
+ PAGE_SIZE);
+
+ out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC);
+ if (out_sg) {
+ sg_init_table(out_sg, nents);
+
+ /* copy the portion of the incoming SG that correlates to the
+ * data to be transferred by this segment to the segment SG. */
+ last_seg_sg = current_seg_sg = out_sg;
+ bytes_processed = 0;
+
+ /* reset nents and calculate the actual number of sg entries
+ needed. */
+ nents = 0;
+ while ((bytes_processed < bytes_to_transfer) &&
+ current_seg_sg && current_xfer_sg) {
+ unsigned int page_len = min((current_xfer_sg->length -
+ offset_into_current_page_data),
+ (bytes_to_transfer - bytes_processed));
+
+ sg_set_page(current_seg_sg, sg_page(current_xfer_sg),
+ page_len,
+ current_xfer_sg->offset +
+ offset_into_current_page_data);
+
+ bytes_processed += page_len;
+
+ last_seg_sg = current_seg_sg;
+ current_seg_sg = sg_next(current_seg_sg);
+ current_xfer_sg = sg_next(current_xfer_sg);
+
+ /* only the first page may require additional offset. */
+ offset_into_current_page_data = 0;
+ nents++;
+ }
+
+ /* update num_sgs and terminate the list since we may have
+ * concatenated pages. */
+ sg_mark_end(last_seg_sg);
+ *out_num_sgs = nents;
+ }
+
+ return out_sg;
+}
+
+/*
+ * Populate DMA buffer info for the isoc dto urb.
+ */
+static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
+ struct wa_seg *seg, int curr_iso_frame)
+{
+ seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ seg->dto_urb->sg = NULL;
+ seg->dto_urb->num_sgs = 0;
+ /* dto urb buffer address pulled from iso_frame_desc. */
+ seg->dto_urb->transfer_dma = xfer->urb->transfer_dma +
+ xfer->urb->iso_frame_desc[curr_iso_frame].offset;
+ /* The Alereon HWA sends a single URB with all isoc segs. */
+ if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+ seg->dto_urb->transfer_buffer_length = seg->isoc_size;
+ else
+ seg->dto_urb->transfer_buffer_length =
+ xfer->urb->iso_frame_desc[curr_iso_frame].length;
+}
+
+/*
+ * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
+ */
+static int __wa_populate_dto_urb(struct wa_xfer *xfer,
+ struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size)
+{
+ int result = 0;
+
+ if (xfer->is_dma) {
+ seg->dto_urb->transfer_dma =
+ xfer->urb->transfer_dma + buf_itr_offset;
+ seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ seg->dto_urb->sg = NULL;
+ seg->dto_urb->num_sgs = 0;
+ } else {
+ /* do buffer or SG processing. */
+ seg->dto_urb->transfer_flags &=
+ ~URB_NO_TRANSFER_DMA_MAP;
+ /* this should always be 0 before a resubmit. */
+ seg->dto_urb->num_mapped_sgs = 0;
+
+ if (xfer->urb->transfer_buffer) {
+ seg->dto_urb->transfer_buffer =
+ xfer->urb->transfer_buffer +
+ buf_itr_offset;
+ seg->dto_urb->sg = NULL;
+ seg->dto_urb->num_sgs = 0;
+ } else {
+ seg->dto_urb->transfer_buffer = NULL;
+
+ /*
+ * allocate an SG list to store seg_size bytes
+ * and copy the subset of the xfer->urb->sg that
+ * matches the buffer subset we are about to
+ * read.
+ */
+ seg->dto_urb->sg = wa_xfer_create_subset_sg(
+ xfer->urb->sg,
+ buf_itr_offset, buf_itr_size,
+ &(seg->dto_urb->num_sgs));
+ if (!(seg->dto_urb->sg))
+ result = -ENOMEM;
+ }
+ }
+ seg->dto_urb->transfer_buffer_length = buf_itr_size;
+
+ return result;
+}
+
+/*
+ * Allocate the segs array and initialize each of them
+ *
+ * The segments are freed by wa_xfer_destroy() when the xfer use count
+ * drops to zero; however, because each segment is given the same life
+ * cycle as the USB URB it contains, it is actually freed by
+ * usb_put_urb() on the contained USB URB (twisted, eh?).
+ */
+static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
+{
+ int result, cnt, isoc_frame_offset = 0;
+ size_t alloc_size = sizeof(*xfer->seg[0])
+ - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
+ struct usb_device *usb_dev = xfer->wa->usb_dev;
+ const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
+ struct wa_seg *seg;
+ size_t buf_itr, buf_size, buf_itr_size;
+
+ result = -ENOMEM;
+ xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
+ if (xfer->seg == NULL)
+ goto error_segs_kzalloc;
+ buf_itr = 0;
+ buf_size = xfer->urb->transfer_buffer_length;
+ for (cnt = 0; cnt < xfer->segs; cnt++) {
+ size_t iso_pkt_descr_size = 0;
+ int seg_isoc_frame_count = 0, seg_isoc_size = 0;
+
+ /*
+ * Adjust the size of the segment object to contain space for
+ * the isoc packet descriptor buffer.
+ */
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ seg_isoc_frame_count =
+ __wa_seg_calculate_isoc_frame_count(xfer,
+ isoc_frame_offset, &seg_isoc_size);
+
+ iso_pkt_descr_size =
+ sizeof(struct wa_xfer_packet_info_hwaiso) +
+ (seg_isoc_frame_count * sizeof(__le16));
+ }
+ result = -ENOMEM;
+ seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size,
+ GFP_ATOMIC);
+ if (seg == NULL)
+ goto error_seg_kmalloc;
+ wa_seg_init(seg);
+ seg->xfer = xfer;
+ seg->index = cnt;
+ usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
+ usb_sndbulkpipe(usb_dev,
+ dto_epd->bEndpointAddress),
+ &seg->xfer_hdr, xfer_hdr_size,
+ wa_seg_tr_cb, seg);
+ buf_itr_size = min(buf_size, xfer->seg_size);
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ seg->isoc_frame_count = seg_isoc_frame_count;
+ seg->isoc_frame_offset = isoc_frame_offset;
+ seg->isoc_size = seg_isoc_size;
+ /* iso packet descriptor. */
+ seg->isoc_pack_desc_urb =
+ usb_alloc_urb(0, GFP_ATOMIC);
+ if (seg->isoc_pack_desc_urb == NULL)
+ goto error_iso_pack_desc_alloc;
+ /*
+ * The buffer for the isoc packet descriptor starts
+ * after the transfer request header in the
+ * segment object memory buffer.
+ */
+ usb_fill_bulk_urb(
+ seg->isoc_pack_desc_urb, usb_dev,
+ usb_sndbulkpipe(usb_dev,
+ dto_epd->bEndpointAddress),
+ (void *)(&seg->xfer_hdr) +
+ xfer_hdr_size,
+ iso_pkt_descr_size,
+ wa_seg_iso_pack_desc_cb, seg);
+
+ /* adjust starting frame offset for next seg. */
+ isoc_frame_offset += seg_isoc_frame_count;
+ }
+
+ if (xfer->is_inbound == 0 && buf_size > 0) {
+ /* outbound data. */
+ seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (seg->dto_urb == NULL)
+ goto error_dto_alloc;
+ usb_fill_bulk_urb(
+ seg->dto_urb, usb_dev,
+ usb_sndbulkpipe(usb_dev,
+ dto_epd->bEndpointAddress),
+ NULL, 0, wa_seg_dto_cb, seg);
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ /*
+ * Fill in the xfer buffer information for the
+ * first isoc frame. Subsequent frames in this
+ * segment will be filled in and sent from the
+ * DTO completion routine, if needed.
+ */
+ __wa_populate_dto_urb_isoc(xfer, seg,
+ seg->isoc_frame_offset);
+ } else {
+ /* fill in the xfer buffer information. */
+ result = __wa_populate_dto_urb(xfer, seg,
+ buf_itr, buf_itr_size);
+ if (result < 0)
+ goto error_seg_outbound_populate;
+
+ buf_itr += buf_itr_size;
+ buf_size -= buf_itr_size;
+ }
+ }
+ seg->status = WA_SEG_READY;
+ }
+ return 0;
+
+ /*
+ * Free the memory for the current segment which failed to init.
+ * Use the fact that cnt is left at were it failed. The remaining
+ * segments will be cleaned up by wa_xfer_destroy.
+ */
+error_seg_outbound_populate:
+ usb_free_urb(xfer->seg[cnt]->dto_urb);
+error_dto_alloc:
+ usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
+error_iso_pack_desc_alloc:
+ kfree(xfer->seg[cnt]);
+ xfer->seg[cnt] = NULL;
+error_seg_kmalloc:
+error_segs_kzalloc:
+ return result;
+}
+
+/*
+ * Allocates all the stuff needed to submit a transfer
+ *
+ * Breaks the whole data buffer in a list of segments, each one has a
+ * structure allocated to it and linked in xfer->seg[index]
+ *
+ * FIXME: merge setup_segs() and the last part of this function, no
+ * need to do two for loops when we could run everything in a
+ * single one
+ */
+static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
+{
+ int result;
+ struct device *dev = &xfer->wa->usb_iface->dev;
+ enum wa_xfer_type xfer_type = 0; /* shut up GCC */
+ size_t xfer_hdr_size, cnt, transfer_size;
+ struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
+
+ result = __wa_xfer_setup_sizes(xfer, &xfer_type);
+ if (result < 0)
+ goto error_setup_sizes;
+ xfer_hdr_size = result;
+ result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
+ if (result < 0) {
+ dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
+ xfer, xfer->segs, result);
+ goto error_setup_segs;
+ }
+ /* Fill the first header */
+ xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
+ wa_xfer_id_init(xfer);
+ __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
+
+ /* Fill remaining headers */
+ xfer_hdr = xfer_hdr0;
+ if (xfer_type == WA_XFER_TYPE_ISO) {
+ xfer_hdr0->dwTransferLength =
+ cpu_to_le32(xfer->seg[0]->isoc_size);
+ for (cnt = 1; cnt < xfer->segs; cnt++) {
+ struct wa_xfer_packet_info_hwaiso *packet_desc;
+ struct wa_seg *seg = xfer->seg[cnt];
+ struct wa_xfer_hwaiso *xfer_iso;
+
+ xfer_hdr = &seg->xfer_hdr;
+ xfer_iso = container_of(xfer_hdr,
+ struct wa_xfer_hwaiso, hdr);
+ packet_desc = ((void *)xfer_hdr) + xfer_hdr_size;
+ /*
+ * Copy values from the 0th header. Segment specific
+ * values are set below.
+ */
+ memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
+ xfer_hdr->bTransferSegment = cnt;
+ xfer_hdr->dwTransferLength =
+ cpu_to_le32(seg->isoc_size);
+ xfer_iso->dwNumOfPackets =
+ cpu_to_le32(seg->isoc_frame_count);
+ __wa_setup_isoc_packet_descr(packet_desc, xfer, seg);
+ seg->status = WA_SEG_READY;
+ }
+ } else {
+ transfer_size = urb->transfer_buffer_length;
+ xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
+ cpu_to_le32(xfer->seg_size) :
+ cpu_to_le32(transfer_size);
+ transfer_size -= xfer->seg_size;
+ for (cnt = 1; cnt < xfer->segs; cnt++) {
+ xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
+ memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
+ xfer_hdr->bTransferSegment = cnt;
+ xfer_hdr->dwTransferLength =
+ transfer_size > xfer->seg_size ?
+ cpu_to_le32(xfer->seg_size)
+ : cpu_to_le32(transfer_size);
+ xfer->seg[cnt]->status = WA_SEG_READY;
+ transfer_size -= xfer->seg_size;
+ }
+ }
+ xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */
+ result = 0;
+error_setup_segs:
+error_setup_sizes:
+ return result;
+}
+
+/*
+ *
+ *
+ * rpipe->seg_lock is held!
+ */
+static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
+ struct wa_seg *seg, int *dto_done)
+{
+ int result;
+
+ /* default to done unless we encounter a multi-frame isoc segment. */
+ *dto_done = 1;
+
+ /*
+ * Take a ref for each segment urb so the xfer cannot disappear until
+ * all of the callbacks run.
+ */
+ wa_xfer_get(xfer);
+ /* submit the transfer request. */
+ seg->status = WA_SEG_SUBMITTED;
+ result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC);
+ if (result < 0) {
+ pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
+ __func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
+ goto error_tr_submit;
+ }
+ /* submit the isoc packet descriptor if present. */
+ if (seg->isoc_pack_desc_urb) {
+ wa_xfer_get(xfer);
+ result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
+ seg->isoc_frame_index = 0;
+ if (result < 0) {
+ pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
+ __func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
+ goto error_iso_pack_desc_submit;
+ }
+ }
+ /* submit the out data if this is an out request. */
+ if (seg->dto_urb) {
+ struct wahc *wa = xfer->wa;
+ wa_xfer_get(xfer);
+ result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
+ if (result < 0) {
+ pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
+ __func__, xfer, seg->index, result);
+ wa_xfer_put(xfer);
+ goto error_dto_submit;
+ }
+ /*
+ * If this segment contains more than one isoc frame, hold
+ * onto the dto resource until we send all frames.
+ * Only applies to non-Alereon devices.
+ */
+ if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
+ && (seg->isoc_frame_count > 1))
+ *dto_done = 0;
+ }
+ rpipe_avail_dec(rpipe);
+ return 0;
+
+error_dto_submit:
+ usb_unlink_urb(seg->isoc_pack_desc_urb);
+error_iso_pack_desc_submit:
+ usb_unlink_urb(&seg->tr_urb);
+error_tr_submit:
+ seg->status = WA_SEG_ERROR;
+ seg->result = result;
+ *dto_done = 1;
+ return result;
+}
+
+/*
+ * Execute more queued request segments until the maximum concurrent allowed.
+ * Return true if the DTO resource was acquired and released.
+ *
+ * The ugly unlock/lock sequence on the error path is needed as the
+ * xfer->lock normally nests the seg_lock and not viceversa.
+ */
+static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting)
+{
+ int result, dto_acquired = 0, dto_done = 0;
+ struct device *dev = &rpipe->wa->usb_iface->dev;
+ struct wa_seg *seg;
+ struct wa_xfer *xfer;
+ unsigned long flags;
+
+ *dto_waiting = 0;
+
+ spin_lock_irqsave(&rpipe->seg_lock, flags);
+ while (atomic_read(&rpipe->segs_available) > 0
+ && !list_empty(&rpipe->seg_list)
+ && (dto_acquired = __wa_dto_try_get(rpipe->wa))) {
+ seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
+ list_node);
+ list_del(&seg->list_node);
+ xfer = seg->xfer;
+ /*
+ * Get a reference to the xfer in case the callbacks for the
+ * URBs submitted by __wa_seg_submit attempt to complete
+ * the xfer before this function completes.
+ */
+ wa_xfer_get(xfer);
+ result = __wa_seg_submit(rpipe, xfer, seg, &dto_done);
+ /* release the dto resource if this RPIPE is done with it. */
+ if (dto_done)
+ __wa_dto_put(rpipe->wa);
+ dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ atomic_read(&rpipe->segs_available), result);
+ if (unlikely(result < 0)) {
+ int done;
+
+ spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+ spin_lock_irqsave(&xfer->lock, flags);
+ __wa_xfer_abort(xfer);
+ /*
+ * This seg was marked as submitted when it was put on
+ * the RPIPE seg_list. Mark it done.
+ */
+ xfer->segs_done++;
+ done = __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ spin_lock_irqsave(&rpipe->seg_lock, flags);
+ }
+ wa_xfer_put(xfer);
+ }
+ /*
+ * Mark this RPIPE as waiting if dto was not acquired, there are
+ * delayed segs and no active transfers to wake us up later.
+ */
+ if (!dto_acquired && !list_empty(&rpipe->seg_list)
+ && (atomic_read(&rpipe->segs_available) ==
+ le16_to_cpu(rpipe->descr.wRequests)))
+ *dto_waiting = 1;
+
+ spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+
+ return dto_done;
+}
+
+static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
+{
+ int dto_waiting;
+ int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting);
+
+ /*
+ * If this RPIPE is waiting on the DTO resource, add it to the tail of
+ * the waiting list.
+ * Otherwise, if the WA DTO resource was acquired and released by
+ * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
+ * DTO and failed during that time. Check the delayed list and process
+ * any waiters. Start searching from the next RPIPE index.
+ */
+ if (dto_waiting)
+ wa_add_delayed_rpipe(rpipe->wa, rpipe);
+ else if (dto_done)
+ wa_check_for_delayed_rpipes(rpipe->wa);
+}
+
+/*
+ *
+ * xfer->lock is taken
+ *
+ * On failure submitting we just stop submitting and return error;
+ * wa_urb_enqueue_b() will execute the completion path
+ */
+static int __wa_xfer_submit(struct wa_xfer *xfer)
+{
+ int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0;
+ struct wahc *wa = xfer->wa;
+ struct device *dev = &wa->usb_iface->dev;
+ unsigned cnt;
+ struct wa_seg *seg;
+ unsigned long flags;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+ size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
+ u8 available;
+ u8 empty;
+
+ spin_lock_irqsave(&wa->xfer_list_lock, flags);
+ list_add_tail(&xfer->list_node, &wa->xfer_list);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
+
+ BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
+ result = 0;
+ spin_lock_irqsave(&rpipe->seg_lock, flags);
+ for (cnt = 0; cnt < xfer->segs; cnt++) {
+ int delay_seg = 1;
+
+ available = atomic_read(&rpipe->segs_available);
+ empty = list_empty(&rpipe->seg_list);
+ seg = xfer->seg[cnt];
+ if (available && empty) {
+ /*
+ * Only attempt to acquire DTO if we have a segment
+ * to send.
+ */
+ dto_acquired = __wa_dto_try_get(rpipe->wa);
+ if (dto_acquired) {
+ delay_seg = 0;
+ result = __wa_seg_submit(rpipe, xfer, seg,
+ &dto_done);
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
+ xfer, wa_xfer_id(xfer), cnt, available,
+ empty);
+ if (dto_done)
+ __wa_dto_put(rpipe->wa);
+
+ if (result < 0) {
+ __wa_xfer_abort(xfer);
+ goto error_seg_submit;
+ }
+ }
+ }
+
+ if (delay_seg) {
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
+ xfer, wa_xfer_id(xfer), cnt, available, empty);
+ seg->status = WA_SEG_DELAYED;
+ list_add_tail(&seg->list_node, &rpipe->seg_list);
+ }
+ xfer->segs_submitted++;
+ }
+error_seg_submit:
+ /*
+ * Mark this RPIPE as waiting if dto was not acquired, there are
+ * delayed segs and no active transfers to wake us up later.
+ */
+ if (!dto_acquired && !list_empty(&rpipe->seg_list)
+ && (atomic_read(&rpipe->segs_available) ==
+ le16_to_cpu(rpipe->descr.wRequests)))
+ dto_waiting = 1;
+ spin_unlock_irqrestore(&rpipe->seg_lock, flags);
+
+ if (dto_waiting)
+ wa_add_delayed_rpipe(rpipe->wa, rpipe);
+ else if (dto_done)
+ wa_check_for_delayed_rpipes(rpipe->wa);
+
+ return result;
+}
+
+/*
+ * Second part of a URB/transfer enqueuement
+ *
+ * Assumes this comes from wa_urb_enqueue() [maybe through
+ * wa_urb_enqueue_run()]. At this point:
+ *
+ * xfer->wa filled and refcounted
+ * xfer->ep filled with rpipe refcounted if
+ * delayed == 0
+ * xfer->urb filled and refcounted (this is the case when called
+ * from wa_urb_enqueue() as we come from usb_submit_urb()
+ * and when called by wa_urb_enqueue_run(), as we took an
+ * extra ref dropped by _run() after we return).
+ * xfer->gfp filled
+ *
+ * If we fail at __wa_xfer_submit(), then we just check if we are done
+ * and if so, we run the completion procedure. However, if we are not
+ * yet done, we do nothing and wait for the completion handlers from
+ * the submitted URBs or from the xfer-result path to kick in. If xfer
+ * result never kicks in, the xfer will timeout from the USB code and
+ * dequeue() will be called.
+ */
+static int wa_urb_enqueue_b(struct wa_xfer *xfer)
+{
+ int result;
+ unsigned long flags;
+ struct urb *urb = xfer->urb;
+ struct wahc *wa = xfer->wa;
+ struct wusbhc *wusbhc = wa->wusb;
+ struct wusb_dev *wusb_dev;
+ unsigned done;
+
+ result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
+ if (result < 0) {
+ pr_err("%s: error_rpipe_get\n", __func__);
+ goto error_rpipe_get;
+ }
+ result = -ENODEV;
+ /* FIXME: segmentation broken -- kills DWA */
+ mutex_lock(&wusbhc->mutex); /* get a WUSB dev */
+ if (urb->dev == NULL) {
+ mutex_unlock(&wusbhc->mutex);
+ pr_err("%s: error usb dev gone\n", __func__);
+ goto error_dev_gone;
+ }
+ wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
+ if (wusb_dev == NULL) {
+ mutex_unlock(&wusbhc->mutex);
+ dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n",
+ __func__);
+ goto error_dev_gone;
+ }
+ mutex_unlock(&wusbhc->mutex);
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ xfer->wusb_dev = wusb_dev;
+ result = urb->status;
+ if (urb->status != -EINPROGRESS) {
+ dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__);
+ goto error_dequeued;
+ }
+
+ result = __wa_xfer_setup(xfer, urb);
+ if (result < 0) {
+ dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__);
+ goto error_xfer_setup;
+ }
+ /*
+ * Get a xfer reference since __wa_xfer_submit starts asynchronous
+ * operations that may try to complete the xfer before this function
+ * exits.
+ */
+ wa_xfer_get(xfer);
+ result = __wa_xfer_submit(xfer);
+ if (result < 0) {
+ dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__);
+ goto error_xfer_submit;
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_xfer_put(xfer);
+ return 0;
+
+ /*
+ * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
+ * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
+ * setup().
+ */
+error_xfer_setup:
+error_dequeued:
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ /* FIXME: segmentation broken, kills DWA */
+ if (wusb_dev)
+ wusb_dev_put(wusb_dev);
+error_dev_gone:
+ rpipe_put(xfer->ep->hcpriv);
+error_rpipe_get:
+ xfer->result = result;
+ return result;
+
+error_xfer_submit:
+ done = __wa_xfer_is_done(xfer);
+ xfer->result = result;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ wa_xfer_put(xfer);
+ /* return success since the completion routine will run. */
+ return 0;
+}
+
+/*
+ * Execute the delayed transfers in the Wire Adapter @wa
+ *
+ * We need to be careful here, as dequeue() could be called in the
+ * middle. That's why we do the whole thing under the
+ * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
+ * and then checks the list -- so as we would be acquiring in inverse
+ * order, we move the delayed list to a separate list while locked and then
+ * submit them without the list lock held.
+ */
+void wa_urb_enqueue_run(struct work_struct *ws)
+{
+ struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
+ struct wa_xfer *xfer, *next;
+ struct urb *urb;
+ LIST_HEAD(tmp_list);
+
+ /* Create a copy of the wa->xfer_delayed_list while holding the lock */
+ spin_lock_irq(&wa->xfer_list_lock);
+ list_cut_position(&tmp_list, &wa->xfer_delayed_list,
+ wa->xfer_delayed_list.prev);
+ spin_unlock_irq(&wa->xfer_list_lock);
+
+ /*
+ * enqueue from temp list without list lock held since wa_urb_enqueue_b
+ * can take xfer->lock as well as lock mutexes.
+ */
+ list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
+ list_del_init(&xfer->list_node);
+
+ urb = xfer->urb;
+ if (wa_urb_enqueue_b(xfer) < 0)
+ wa_xfer_giveback(xfer);
+ usb_put_urb(urb); /* taken when queuing */
+ }
+}
+EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
+
+/*
+ * Process the errored transfers on the Wire Adapter outside of interrupt.
+ */
+void wa_process_errored_transfers_run(struct work_struct *ws)
+{
+ struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
+ struct wa_xfer *xfer, *next;
+ LIST_HEAD(tmp_list);
+
+ pr_info("%s: Run delayed STALL processing.\n", __func__);
+
+ /* Create a copy of the wa->xfer_errored_list while holding the lock */
+ spin_lock_irq(&wa->xfer_list_lock);
+ list_cut_position(&tmp_list, &wa->xfer_errored_list,
+ wa->xfer_errored_list.prev);
+ spin_unlock_irq(&wa->xfer_list_lock);
+
+ /*
+ * run rpipe_clear_feature_stalled from temp list without list lock
+ * held.
+ */
+ list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
+ struct usb_host_endpoint *ep;
+ unsigned long flags;
+ struct wa_rpipe *rpipe;
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ ep = xfer->ep;
+ rpipe = ep->hcpriv;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ /* clear RPIPE feature stalled without holding a lock. */
+ rpipe_clear_feature_stalled(wa, ep);
+
+ /* complete the xfer. This removes it from the tmp list. */
+ wa_xfer_completion(xfer);
+
+ /* check for work. */
+ wa_xfer_delayed_run(rpipe);
+ }
+}
+EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
+
+/*
+ * Submit a transfer to the Wire Adapter in a delayed way
+ *
+ * The process of enqueuing involves possible sleeps() [see
+ * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
+ * in an atomic section, we defer the enqueue_b() call--else we call direct.
+ *
+ * @urb: We own a reference to it done by the HCI Linux USB stack that
+ * will be given up by calling usb_hcd_giveback_urb() or by
+ * returning error from this function -> ergo we don't have to
+ * refcount it.
+ */
+int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
+ struct urb *urb, gfp_t gfp)
+{
+ int result;
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_xfer *xfer;
+ unsigned long my_flags;
+ unsigned cant_sleep = irqs_disabled() | in_atomic();
+
+ if ((urb->transfer_buffer == NULL)
+ && (urb->sg == NULL)
+ && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
+ && urb->transfer_buffer_length != 0) {
+ dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
+ dump_stack();
+ }
+
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+ if (result < 0)
+ goto error_link_urb;
+
+ result = -ENOMEM;
+ xfer = kzalloc(sizeof(*xfer), gfp);
+ if (xfer == NULL)
+ goto error_kmalloc;
+
+ result = -ENOENT;
+ if (urb->status != -EINPROGRESS) /* cancelled */
+ goto error_dequeued; /* before starting? */
+ wa_xfer_init(xfer);
+ xfer->wa = wa_get(wa);
+ xfer->urb = urb;
+ xfer->gfp = gfp;
+ xfer->ep = ep;
+ urb->hcpriv = xfer;
+
+ dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
+ xfer, urb, urb->pipe, urb->transfer_buffer_length,
+ urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
+ urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
+ cant_sleep ? "deferred" : "inline");
+
+ if (cant_sleep) {
+ usb_get_urb(urb);
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+ queue_work(wusbd, &wa->xfer_enqueue_work);
+ } else {
+ result = wa_urb_enqueue_b(xfer);
+ if (result < 0) {
+ /*
+ * URB submit/enqueue failed. Clean up, return an
+ * error and do not run the callback. This avoids
+ * an infinite submit/complete loop.
+ */
+ dev_err(dev, "%s: URB enqueue failed: %d\n",
+ __func__, result);
+ wa_put(xfer->wa);
+ wa_xfer_put(xfer);
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+ return result;
+ }
+ }
+ return 0;
+
+error_dequeued:
+ kfree(xfer);
+error_kmalloc:
+ spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
+ usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb);
+ spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
+error_link_urb:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wa_urb_enqueue);
+
+/*
+ * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
+ * handler] is called.
+ *
+ * Until a transfer goes successfully through wa_urb_enqueue() it
+ * needs to be dequeued with completion calling; when stuck in delayed
+ * or before wa_xfer_setup() is called, we need to do completion.
+ *
+ * not setup If there is no hcpriv yet, that means that that enqueue
+ * still had no time to set the xfer up. Because
+ * urb->status should be other than -EINPROGRESS,
+ * enqueue() will catch that and bail out.
+ *
+ * If the transfer has gone through setup, we just need to clean it
+ * up. If it has gone through submit(), we have to abort it [with an
+ * asynch request] and then make sure we cancel each segment.
+ *
+ */
+int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status)
+{
+ unsigned long flags;
+ struct wa_xfer *xfer;
+ struct wa_seg *seg;
+ struct wa_rpipe *rpipe;
+ unsigned cnt, done = 0, xfer_abort_pending;
+ unsigned rpipe_ready = 0;
+ int result;
+
+ /* check if it is safe to unlink. */
+ spin_lock_irqsave(&wa->xfer_list_lock, flags);
+ result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status);
+ if ((result == 0) && urb->hcpriv) {
+ /*
+ * Get a xfer ref to prevent a race with wa_xfer_giveback
+ * cleaning up the xfer while we are working with it.
+ */
+ wa_xfer_get(urb->hcpriv);
+ }
+ spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
+ if (result)
+ return result;
+
+ xfer = urb->hcpriv;
+ if (xfer == NULL)
+ return -ENOENT;
+ spin_lock_irqsave(&xfer->lock, flags);
+ pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer));
+ rpipe = xfer->ep->hcpriv;
+ if (rpipe == NULL) {
+ pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s",
+ __func__, xfer, wa_xfer_id(xfer),
+ "Probably already aborted.\n" );
+ result = -ENOENT;
+ goto out_unlock;
+ }
+ /*
+ * Check for done to avoid racing with wa_xfer_giveback and completing
+ * twice.
+ */
+ if (__wa_xfer_is_done(xfer)) {
+ pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__,
+ xfer, wa_xfer_id(xfer));
+ result = -ENOENT;
+ goto out_unlock;
+ }
+ /* Check the delayed list -> if there, release and complete */
+ spin_lock(&wa->xfer_list_lock);
+ if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
+ goto dequeue_delayed;
+ spin_unlock(&wa->xfer_list_lock);
+ if (xfer->seg == NULL) /* still hasn't reached */
+ goto out_unlock; /* setup(), enqueue_b() completes */
+ /* Ok, the xfer is in flight already, it's been setup and submitted.*/
+ xfer_abort_pending = __wa_xfer_abort(xfer) >= 0;
+ /*
+ * grab the rpipe->seg_lock here to prevent racing with
+ * __wa_xfer_delayed_run.
+ */
+ spin_lock(&rpipe->seg_lock);
+ for (cnt = 0; cnt < xfer->segs; cnt++) {
+ seg = xfer->seg[cnt];
+ pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
+ __func__, wa_xfer_id(xfer), cnt, seg->status);
+ switch (seg->status) {
+ case WA_SEG_NOTREADY:
+ case WA_SEG_READY:
+ printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
+ xfer, cnt, seg->status);
+ WARN_ON(1);
+ break;
+ case WA_SEG_DELAYED:
+ /*
+ * delete from rpipe delayed list. If no segments on
+ * this xfer have been submitted, __wa_xfer_is_done will
+ * trigger a giveback below. Otherwise, the submitted
+ * segments will be completed in the DTI interrupt.
+ */
+ seg->status = WA_SEG_ABORTED;
+ seg->result = -ENOENT;
+ list_del(&seg->list_node);
+ xfer->segs_done++;
+ break;
+ case WA_SEG_DONE:
+ case WA_SEG_ERROR:
+ case WA_SEG_ABORTED:
+ break;
+ /*
+ * The buf_in data for a segment in the
+ * WA_SEG_DTI_PENDING state is actively being read.
+ * Let wa_buf_in_cb handle it since it will be called
+ * and will increment xfer->segs_done. Cleaning up
+ * here could cause wa_buf_in_cb to access the xfer
+ * after it has been completed/freed.
+ */
+ case WA_SEG_DTI_PENDING:
+ break;
+ /*
+ * In the states below, the HWA device already knows
+ * about the transfer. If an abort request was sent,
+ * allow the HWA to process it and wait for the
+ * results. Otherwise, the DTI state and seg completed
+ * counts can get out of sync.
+ */
+ case WA_SEG_SUBMITTED:
+ case WA_SEG_PENDING:
+ /*
+ * Check if the abort was successfully sent. This could
+ * be false if the HWA has been removed but we haven't
+ * gotten the disconnect notification yet.
+ */
+ if (!xfer_abort_pending) {
+ seg->status = WA_SEG_ABORTED;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ xfer->segs_done++;
+ }
+ break;
+ }
+ }
+ spin_unlock(&rpipe->seg_lock);
+ xfer->result = urb->status; /* -ENOENT or -ECONNRESET */
+ done = __wa_xfer_is_done(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ wa_xfer_put(xfer);
+ return result;
+
+out_unlock:
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_xfer_put(xfer);
+ return result;
+
+dequeue_delayed:
+ list_del_init(&xfer->list_node);
+ spin_unlock(&wa->xfer_list_lock);
+ xfer->result = urb->status;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_xfer_giveback(xfer);
+ wa_xfer_put(xfer);
+ usb_put_urb(urb); /* we got a ref in enqueue() */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wa_urb_dequeue);
+
+/*
+ * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
+ * codes
+ *
+ * Positive errno values are internal inconsistencies and should be
+ * flagged louder. Negative are to be passed up to the user in the
+ * normal way.
+ *
+ * @status: USB WA status code -- high two bits are stripped.
+ */
+static int wa_xfer_status_to_errno(u8 status)
+{
+ int errno;
+ u8 real_status = status;
+ static int xlat[] = {
+ [WA_XFER_STATUS_SUCCESS] = 0,
+ [WA_XFER_STATUS_HALTED] = -EPIPE,
+ [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS,
+ [WA_XFER_STATUS_BABBLE] = -EOVERFLOW,
+ [WA_XFER_RESERVED] = EINVAL,
+ [WA_XFER_STATUS_NOT_FOUND] = 0,
+ [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
+ [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ,
+ [WA_XFER_STATUS_ABORTED] = -ENOENT,
+ [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL,
+ [WA_XFER_INVALID_FORMAT] = EINVAL,
+ [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL,
+ [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL,
+ };
+ status &= 0x3f;
+
+ if (status == 0)
+ return 0;
+ if (status >= ARRAY_SIZE(xlat)) {
+ printk_ratelimited(KERN_ERR "%s(): BUG? "
+ "Unknown WA transfer status 0x%02x\n",
+ __func__, real_status);
+ return -EINVAL;
+ }
+ errno = xlat[status];
+ if (unlikely(errno > 0)) {
+ printk_ratelimited(KERN_ERR "%s(): BUG? "
+ "Inconsistent WA status: 0x%02x\n",
+ __func__, real_status);
+ errno = -errno;
+ }
+ return errno;
+}
+
+/*
+ * If a last segment flag and/or a transfer result error is encountered,
+ * no other segment transfer results will be returned from the device.
+ * Mark the remaining submitted or pending xfers as completed so that
+ * the xfer will complete cleanly.
+ *
+ * xfer->lock must be held
+ *
+ */
+static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
+ int starting_index, enum wa_seg_status status)
+{
+ int index;
+ struct wa_rpipe *rpipe = xfer->ep->hcpriv;
+
+ for (index = starting_index; index < xfer->segs_submitted; index++) {
+ struct wa_seg *current_seg = xfer->seg[index];
+
+ BUG_ON(current_seg == NULL);
+
+ switch (current_seg->status) {
+ case WA_SEG_SUBMITTED:
+ case WA_SEG_PENDING:
+ case WA_SEG_DTI_PENDING:
+ rpipe_avail_inc(rpipe);
+ /*
+ * do not increment RPIPE avail for the WA_SEG_DELAYED case
+ * since it has not been submitted to the RPIPE.
+ */
+ /* fall through */
+ case WA_SEG_DELAYED:
+ xfer->segs_done++;
+ current_seg->status = status;
+ break;
+ case WA_SEG_ABORTED:
+ break;
+ default:
+ WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
+ __func__, wa_xfer_id(xfer), index,
+ current_seg->status);
+ break;
+ }
+ }
+}
+
+/* Populate the given urb based on the current isoc transfer state. */
+static int __wa_populate_buf_in_urb_isoc(struct wahc *wa,
+ struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg)
+{
+ int urb_start_frame = seg->isoc_frame_index + seg->isoc_frame_offset;
+ int seg_index, total_len = 0, urb_frame_index = urb_start_frame;
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ const int dti_packet_size = usb_endpoint_maxp(wa->dti_epd);
+ int next_frame_contiguous;
+ struct usb_iso_packet_descriptor *iso_frame;
+
+ BUG_ON(buf_in_urb->status == -EINPROGRESS);
+
+ /*
+ * If the current frame actual_length is contiguous with the next frame
+ * and actual_length is a multiple of the DTI endpoint max packet size,
+ * combine the current frame with the next frame in a single URB. This
+ * reduces the number of URBs that must be submitted in that case.
+ */
+ seg_index = seg->isoc_frame_index;
+ do {
+ next_frame_contiguous = 0;
+
+ iso_frame = &iso_frame_desc[urb_frame_index];
+ total_len += iso_frame->actual_length;
+ ++urb_frame_index;
+ ++seg_index;
+
+ if (seg_index < seg->isoc_frame_count) {
+ struct usb_iso_packet_descriptor *next_iso_frame;
+
+ next_iso_frame = &iso_frame_desc[urb_frame_index];
+
+ if ((iso_frame->offset + iso_frame->actual_length) ==
+ next_iso_frame->offset)
+ next_frame_contiguous = 1;
+ }
+ } while (next_frame_contiguous
+ && ((iso_frame->actual_length % dti_packet_size) == 0));
+
+ /* this should always be 0 before a resubmit. */
+ buf_in_urb->num_mapped_sgs = 0;
+ buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
+ iso_frame_desc[urb_start_frame].offset;
+ buf_in_urb->transfer_buffer_length = total_len;
+ buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
+ buf_in_urb->context = seg;
+
+ /* return the number of frames included in this URB. */
+ return seg_index - seg->isoc_frame_index;
+}
+
+/* Populate the given urb based on the current transfer state. */
+static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer,
+ unsigned int seg_idx, unsigned int bytes_transferred)
+{
+ int result = 0;
+ struct wa_seg *seg = xfer->seg[seg_idx];
+
+ BUG_ON(buf_in_urb->status == -EINPROGRESS);
+ /* this should always be 0 before a resubmit. */
+ buf_in_urb->num_mapped_sgs = 0;
+
+ if (xfer->is_dma) {
+ buf_in_urb->transfer_dma = xfer->urb->transfer_dma
+ + (seg_idx * xfer->seg_size);
+ buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ buf_in_urb->transfer_buffer = NULL;
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
+ } else {
+ /* do buffer or SG processing. */
+ buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP;
+
+ if (xfer->urb->transfer_buffer) {
+ buf_in_urb->transfer_buffer =
+ xfer->urb->transfer_buffer
+ + (seg_idx * xfer->seg_size);
+ buf_in_urb->sg = NULL;
+ buf_in_urb->num_sgs = 0;
+ } else {
+ /* allocate an SG list to store seg_size bytes
+ and copy the subset of the xfer->urb->sg
+ that matches the buffer subset we are
+ about to read. */
+ buf_in_urb->sg = wa_xfer_create_subset_sg(
+ xfer->urb->sg,
+ seg_idx * xfer->seg_size,
+ bytes_transferred,
+ &(buf_in_urb->num_sgs));
+
+ if (!(buf_in_urb->sg)) {
+ buf_in_urb->num_sgs = 0;
+ result = -ENOMEM;
+ }
+ buf_in_urb->transfer_buffer = NULL;
+ }
+ }
+ buf_in_urb->transfer_buffer_length = bytes_transferred;
+ buf_in_urb->context = seg;
+
+ return result;
+}
+
+/*
+ * Process a xfer result completion message
+ *
+ * inbound transfers: need to schedule a buf_in_urb read
+ *
+ * FIXME: this function needs to be broken up in parts
+ */
+static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
+ struct wa_xfer_result *xfer_result)
+{
+ int result;
+ struct device *dev = &wa->usb_iface->dev;
+ unsigned long flags;
+ unsigned int seg_idx;
+ struct wa_seg *seg;
+ struct wa_rpipe *rpipe;
+ unsigned done = 0;
+ u8 usb_status;
+ unsigned rpipe_ready = 0;
+ unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength);
+ struct urb *buf_in_urb = &(wa->buf_in_urbs[0]);
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ seg_idx = xfer_result->bTransferSegment & 0x7f;
+ if (unlikely(seg_idx >= xfer->segs))
+ goto error_bad_seg;
+ seg = xfer->seg[seg_idx];
+ rpipe = xfer->ep->hcpriv;
+ usb_status = xfer_result->bTransferStatus;
+ dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
+ xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status);
+ if (seg->status == WA_SEG_ABORTED
+ || seg->status == WA_SEG_ERROR) /* already handled */
+ goto segment_aborted;
+ if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */
+ seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */
+ if (seg->status != WA_SEG_PENDING) {
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
+ xfer, seg_idx, seg->status);
+ seg->status = WA_SEG_PENDING; /* workaround/"fix" it */
+ }
+ if (usb_status & 0x80) {
+ seg->result = wa_xfer_status_to_errno(usb_status);
+ dev_err(dev, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
+ xfer, xfer->id, seg->index, usb_status);
+ seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ?
+ WA_SEG_ABORTED : WA_SEG_ERROR;
+ goto error_complete;
+ }
+ /* FIXME: we ignore warnings, tally them for stats */
+ if (usb_status & 0x40) /* Warning?... */
+ usb_status = 0; /* ... pass */
+ /*
+ * If the last segment bit is set, complete the remaining segments.
+ * When the current segment is completed, either in wa_buf_in_cb for
+ * transfers with data or below for no data, the xfer will complete.
+ */
+ if (xfer_result->bTransferSegment & 0x80)
+ wa_complete_remaining_xfer_segs(xfer, seg->index + 1,
+ WA_SEG_DONE);
+ if (usb_pipeisoc(xfer->urb->pipe)
+ && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
+ /* set up WA state to read the isoc packet status next. */
+ wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
+ wa->dti_isoc_xfer_seg = seg_idx;
+ wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
+ } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
+ && (bytes_transferred > 0)) {
+ /* IN data phase: read to buffer */
+ seg->status = WA_SEG_DTI_PENDING;
+ result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx,
+ bytes_transferred);
+ if (result < 0)
+ goto error_buf_in_populate;
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
+ if (result < 0) {
+ --(wa->active_buf_in_urbs);
+ goto error_submit_buf_in;
+ }
+ } else {
+ /* OUT data phase or no data, complete it -- */
+ seg->result = bytes_transferred;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ return;
+
+error_submit_buf_in:
+ if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "DTI: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
+ xfer, seg_idx, result);
+ seg->result = result;
+ kfree(buf_in_urb->sg);
+ buf_in_urb->sg = NULL;
+error_buf_in_populate:
+ __wa_xfer_abort(xfer);
+ seg->status = WA_SEG_ERROR;
+error_complete:
+ xfer->segs_done++;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status);
+ done = __wa_xfer_is_done(xfer);
+ /*
+ * queue work item to clear STALL for control endpoints.
+ * Otherwise, let endpoint_reset take care of it.
+ */
+ if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
+ usb_endpoint_xfer_control(&xfer->ep->desc) &&
+ done) {
+
+ dev_info(dev, "Control EP stall. Queue delayed work.\n");
+ spin_lock(&wa->xfer_list_lock);
+ /* move xfer from xfer_list to xfer_errored_list. */
+ list_move_tail(&xfer->list_node, &wa->xfer_errored_list);
+ spin_unlock(&wa->xfer_list_lock);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ queue_work(wusbd, &wa->xfer_error_work);
+ } else {
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ }
+
+ return;
+
+error_bad_seg:
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_urb_dequeue(wa, xfer->urb, -ENOENT);
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
+ if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "DTI: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ return;
+
+segment_aborted:
+ /* nothing to do, as the aborter did the completion */
+ spin_unlock_irqrestore(&xfer->lock, flags);
+}
+
+/*
+ * Process a isochronous packet status message
+ *
+ * inbound transfers: need to schedule a buf_in_urb read
+ */
+static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
+{
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_xfer_packet_status_hwaiso *packet_status;
+ struct wa_xfer_packet_status_len_hwaiso *status_array;
+ struct wa_xfer *xfer;
+ unsigned long flags;
+ struct wa_seg *seg;
+ struct wa_rpipe *rpipe;
+ unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
+ unsigned first_frame_index = 0, rpipe_ready = 0;
+ size_t expected_size;
+
+ /* We have a xfer result buffer; check it */
+ dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n",
+ urb->actual_length, urb->transfer_buffer);
+ packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf);
+ if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) {
+ dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n",
+ packet_status->bPacketType);
+ goto error_parse_buffer;
+ }
+ xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress);
+ if (xfer == NULL) {
+ dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
+ wa->dti_isoc_xfer_in_progress);
+ goto error_parse_buffer;
+ }
+ spin_lock_irqsave(&xfer->lock, flags);
+ if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs))
+ goto error_bad_seg;
+ seg = xfer->seg[wa->dti_isoc_xfer_seg];
+ rpipe = xfer->ep->hcpriv;
+ expected_size = struct_size(packet_status, PacketStatus,
+ seg->isoc_frame_count);
+ if (urb->actual_length != expected_size) {
+ dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %zu needed)\n",
+ urb->actual_length, expected_size);
+ goto error_bad_seg;
+ }
+ if (le16_to_cpu(packet_status->wLength) != expected_size) {
+ dev_err(dev, "DTI Error: isoc packet status--bad length %u\n",
+ le16_to_cpu(packet_status->wLength));
+ goto error_bad_seg;
+ }
+ /* write isoc packet status and lengths back to the xfer urb. */
+ status_array = packet_status->PacketStatus;
+ xfer->urb->start_frame =
+ wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
+ for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ const int xfer_frame_index =
+ seg->isoc_frame_offset + seg_index;
+
+ iso_frame_desc[xfer_frame_index].status =
+ wa_xfer_status_to_errno(
+ le16_to_cpu(status_array[seg_index].PacketStatus));
+ iso_frame_desc[xfer_frame_index].actual_length =
+ le16_to_cpu(status_array[seg_index].PacketLength);
+ /* track the number of frames successfully transferred. */
+ if (iso_frame_desc[xfer_frame_index].actual_length > 0) {
+ /* save the starting frame index for buf_in_urb. */
+ if (!data_frame_count)
+ first_frame_index = seg_index;
+ ++data_frame_count;
+ }
+ }
+
+ if (xfer->is_inbound && data_frame_count) {
+ int result, total_frames_read = 0, urb_index = 0;
+ struct urb *buf_in_urb;
+
+ /* IN data phase: read to buffer */
+ seg->status = WA_SEG_DTI_PENDING;
+
+ /* start with the first frame with data. */
+ seg->isoc_frame_index = first_frame_index;
+ /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
+ do {
+ int urb_frame_index, urb_frame_count;
+ struct usb_iso_packet_descriptor *iso_frame_desc;
+
+ buf_in_urb = &(wa->buf_in_urbs[urb_index]);
+ urb_frame_count = __wa_populate_buf_in_urb_isoc(wa,
+ buf_in_urb, xfer, seg);
+ /* advance frame index to start of next read URB. */
+ seg->isoc_frame_index += urb_frame_count;
+ total_frames_read += urb_frame_count;
+
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(buf_in_urb, GFP_ATOMIC);
+
+ /* skip 0-byte frames. */
+ urb_frame_index =
+ seg->isoc_frame_offset + seg->isoc_frame_index;
+ iso_frame_desc =
+ &(xfer->urb->iso_frame_desc[urb_frame_index]);
+ while ((seg->isoc_frame_index <
+ seg->isoc_frame_count) &&
+ (iso_frame_desc->actual_length == 0)) {
+ ++(seg->isoc_frame_index);
+ ++iso_frame_desc;
+ }
+ ++urb_index;
+
+ } while ((result == 0) && (urb_index < WA_MAX_BUF_IN_URBS)
+ && (seg->isoc_frame_index <
+ seg->isoc_frame_count));
+
+ if (result < 0) {
+ --(wa->active_buf_in_urbs);
+ dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
+ result);
+ wa_reset_all(wa);
+ } else if (data_frame_count > total_frames_read)
+ /* If we need to read more frames, set DTI busy. */
+ dti_busy = 1;
+ } else {
+ /* OUT transfer or no more IN data, complete it -- */
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (dti_busy)
+ wa->dti_state = WA_DTI_BUF_IN_DATA_PENDING;
+ else
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ wa_xfer_put(xfer);
+ return dti_busy;
+
+error_bad_seg:
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ wa_xfer_put(xfer);
+error_parse_buffer:
+ return dti_busy;
+}
+
+/*
+ * Callback for the IN data phase
+ *
+ * If successful transition state; otherwise, take a note of the
+ * error, mark this segment done and try completion.
+ *
+ * Note we don't access until we are sure that the transfer hasn't
+ * been cancelled (ECONNRESET, ENOENT), which could mean that
+ * seg->xfer could be already gone.
+ */
+static void wa_buf_in_cb(struct urb *urb)
+{
+ struct wa_seg *seg = urb->context;
+ struct wa_xfer *xfer = seg->xfer;
+ struct wahc *wa;
+ struct device *dev;
+ struct wa_rpipe *rpipe;
+ unsigned rpipe_ready = 0, isoc_data_frame_count = 0;
+ unsigned long flags;
+ int resubmit_dti = 0, active_buf_in_urbs;
+ u8 done = 0;
+
+ /* free the sg if it was used. */
+ kfree(urb->sg);
+ urb->sg = NULL;
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ wa = xfer->wa;
+ dev = &wa->usb_iface->dev;
+ --(wa->active_buf_in_urbs);
+ active_buf_in_urbs = wa->active_buf_in_urbs;
+ rpipe = xfer->ep->hcpriv;
+
+ if (usb_pipeisoc(xfer->urb->pipe)) {
+ struct usb_iso_packet_descriptor *iso_frame_desc =
+ xfer->urb->iso_frame_desc;
+ int seg_index;
+
+ /*
+ * Find the next isoc frame with data and count how many
+ * frames with data remain.
+ */
+ seg_index = seg->isoc_frame_index;
+ while (seg_index < seg->isoc_frame_count) {
+ const int urb_frame_index =
+ seg->isoc_frame_offset + seg_index;
+
+ if (iso_frame_desc[urb_frame_index].actual_length > 0) {
+ /* save the index of the next frame with data */
+ if (!isoc_data_frame_count)
+ seg->isoc_frame_index = seg_index;
+ ++isoc_data_frame_count;
+ }
+ ++seg_index;
+ }
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ switch (urb->status) {
+ case 0:
+ spin_lock_irqsave(&xfer->lock, flags);
+
+ seg->result += urb->actual_length;
+ if (isoc_data_frame_count > 0) {
+ int result, urb_frame_count;
+
+ /* submit a read URB for the next frame with data. */
+ urb_frame_count = __wa_populate_buf_in_urb_isoc(wa, urb,
+ xfer, seg);
+ /* advance index to start of next read URB. */
+ seg->isoc_frame_index += urb_frame_count;
+ ++(wa->active_buf_in_urbs);
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result < 0) {
+ --(wa->active_buf_in_urbs);
+ dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
+ result);
+ wa_reset_all(wa);
+ }
+ /*
+ * If we are in this callback and
+ * isoc_data_frame_count > 0, it means that the dti_urb
+ * submission was delayed in wa_dti_cb. Once
+ * we submit the last buf_in_urb, we can submit the
+ * delayed dti_urb.
+ */
+ resubmit_dti = (isoc_data_frame_count ==
+ urb_frame_count);
+ } else if (active_buf_in_urbs == 0) {
+ dev_dbg(dev,
+ "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ seg->result);
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_DONE);
+ }
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ break;
+ case -ECONNRESET: /* URB unlinked; no need to do anything */
+ case -ENOENT: /* as it was done by the who unlinked us */
+ break;
+ default: /* Other errors ... */
+ /*
+ * Error on data buf read. Only resubmit DTI if it hasn't
+ * already been done by previously hitting this error or by a
+ * successful completion of the previous buf_in_urb.
+ */
+ resubmit_dti = wa->dti_state != WA_DTI_TRANSFER_RESULT_PENDING;
+ spin_lock_irqsave(&xfer->lock, flags);
+ if (printk_ratelimit())
+ dev_err(dev, "xfer %p 0x%08X#%u: data in error %d\n",
+ xfer, wa_xfer_id(xfer), seg->index,
+ urb->status);
+ if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)){
+ dev_err(dev, "DTO: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ }
+ seg->result = urb->status;
+ rpipe_ready = rpipe_avail_inc(rpipe);
+ if (active_buf_in_urbs == 0)
+ done = __wa_xfer_mark_seg_as_done(xfer, seg,
+ WA_SEG_ERROR);
+ else
+ __wa_xfer_abort(xfer);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ }
+
+ if (resubmit_dti) {
+ int result;
+
+ wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING;
+
+ result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
+ result);
+ wa_reset_all(wa);
+ }
+ }
+}
+
+/*
+ * Handle an incoming transfer result buffer
+ *
+ * Given a transfer result buffer, it completes the transfer (possibly
+ * scheduling and buffer in read) and then resubmits the DTI URB for a
+ * new transfer result read.
+ *
+ *
+ * The xfer_result DTI URB state machine
+ *
+ * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
+ *
+ * We start in OFF mode, the first xfer_result notification [through
+ * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
+ * read.
+ *
+ * We receive a buffer -- if it is not a xfer_result, we complain and
+ * repost the DTI-URB. If it is a xfer_result then do the xfer seg
+ * request accounting. If it is an IN segment, we move to RBI and post
+ * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
+ * repost the DTI-URB and move to RXR state. if there was no IN
+ * segment, it will repost the DTI-URB.
+ *
+ * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
+ * errors) in the URBs.
+ */
+static void wa_dti_cb(struct urb *urb)
+{
+ int result, dti_busy = 0;
+ struct wahc *wa = urb->context;
+ struct device *dev = &wa->usb_iface->dev;
+ u32 xfer_id;
+ u8 usb_status;
+
+ BUG_ON(wa->dti_urb != urb);
+ switch (wa->dti_urb->status) {
+ case 0:
+ if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) {
+ struct wa_xfer_result *xfer_result;
+ struct wa_xfer *xfer;
+
+ /* We have a xfer result buffer; check it */
+ dev_dbg(dev, "DTI: xfer result %d bytes at %p\n",
+ urb->actual_length, urb->transfer_buffer);
+ if (urb->actual_length != sizeof(*xfer_result)) {
+ dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
+ urb->actual_length,
+ sizeof(*xfer_result));
+ break;
+ }
+ xfer_result = (struct wa_xfer_result *)(wa->dti_buf);
+ if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
+ dev_err(dev, "DTI Error: xfer result--bad header length %u\n",
+ xfer_result->hdr.bLength);
+ break;
+ }
+ if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
+ dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n",
+ xfer_result->hdr.bNotifyType);
+ break;
+ }
+ xfer_id = le32_to_cpu(xfer_result->dwTransferID);
+ usb_status = xfer_result->bTransferStatus & 0x3f;
+ if (usb_status == WA_XFER_STATUS_NOT_FOUND) {
+ /* taken care of already */
+ dev_dbg(dev, "%s: xfer 0x%08X#%u not found.\n",
+ __func__, xfer_id,
+ xfer_result->bTransferSegment & 0x7f);
+ break;
+ }
+ xfer = wa_xfer_get_by_id(wa, xfer_id);
+ if (xfer == NULL) {
+ /* FIXME: transaction not found. */
+ dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
+ xfer_id, usb_status);
+ break;
+ }
+ wa_xfer_result_chew(wa, xfer, xfer_result);
+ wa_xfer_put(xfer);
+ } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
+ dti_busy = wa_process_iso_packet_status(wa, urb);
+ } else {
+ dev_err(dev, "DTI Error: unexpected EP state = %d\n",
+ wa->dti_state);
+ }
+ break;
+ case -ENOENT: /* (we killed the URB)...so, no broadcast */
+ case -ESHUTDOWN: /* going away! */
+ dev_dbg(dev, "DTI: going down! %d\n", urb->status);
+ goto out;
+ default:
+ /* Unknown error */
+ if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
+ EDC_ERROR_TIMEFRAME)) {
+ dev_err(dev, "DTI: URB max acceptable errors "
+ "exceeded, resetting device\n");
+ wa_reset_all(wa);
+ goto out;
+ }
+ if (printk_ratelimit())
+ dev_err(dev, "DTI: URB error %d\n", urb->status);
+ break;
+ }
+
+ /* Resubmit the DTI URB if we are not busy processing isoc in frames. */
+ if (!dti_busy) {
+ result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
+ result);
+ wa_reset_all(wa);
+ }
+ }
+out:
+ return;
+}
+
+/*
+ * Initialize the DTI URB for reading transfer result notifications and also
+ * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
+ */
+int wa_dti_start(struct wahc *wa)
+{
+ const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
+ struct device *dev = &wa->usb_iface->dev;
+ int result = -ENOMEM, index;
+
+ if (wa->dti_urb != NULL) /* DTI URB already started */
+ goto out;
+
+ wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (wa->dti_urb == NULL)
+ goto error_dti_urb_alloc;
+ usb_fill_bulk_urb(
+ wa->dti_urb, wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev, 0x80 | dti_epd->bEndpointAddress),
+ wa->dti_buf, wa->dti_buf_size,
+ wa_dti_cb, wa);
+
+ /* init the buf in URBs */
+ for (index = 0; index < WA_MAX_BUF_IN_URBS; ++index) {
+ usb_fill_bulk_urb(
+ &(wa->buf_in_urbs[index]), wa->usb_dev,
+ usb_rcvbulkpipe(wa->usb_dev,
+ 0x80 | dti_epd->bEndpointAddress),
+ NULL, 0, wa_buf_in_cb, wa);
+ }
+ result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
+ if (result < 0) {
+ dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
+ result);
+ goto error_dti_urb_submit;
+ }
+out:
+ return 0;
+
+error_dti_urb_submit:
+ usb_put_urb(wa->dti_urb);
+ wa->dti_urb = NULL;
+error_dti_urb_alloc:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wa_dti_start);
+/*
+ * Transfer complete notification
+ *
+ * Called from the notif.c code. We get a notification on EP2 saying
+ * that some endpoint has some transfer result data available. We are
+ * about to read it.
+ *
+ * To speed up things, we always have a URB reading the DTI URB; we
+ * don't really set it up and start it until the first xfer complete
+ * notification arrives, which is what we do here.
+ *
+ * Follow up in wa_dti_cb(), as that's where the whole state
+ * machine starts.
+ *
+ * @wa shall be referenced
+ */
+void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
+{
+ struct device *dev = &wa->usb_iface->dev;
+ struct wa_notif_xfer *notif_xfer;
+ const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
+
+ notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
+ BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
+
+ if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
+ /* FIXME: hardcoded limitation, adapt */
+ dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
+ notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
+ goto error;
+ }
+
+ /* attempt to start the DTI ep processing. */
+ if (wa_dti_start(wa) < 0)
+ goto error;
+
+ return;
+
+error:
+ wa_reset_all(wa);
+}
diff --git a/drivers/staging/wusbcore/wusbhc.c b/drivers/staging/wusbcore/wusbhc.c
new file mode 100644
index 000000000000..d0b404d258e8
--- /dev/null
+++ b/drivers/staging/wusbcore/wusbhc.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB Host Controller
+ * sysfs glue, wusbcore module support and life cycle management
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * Creation/destruction of wusbhc is split in two parts; that that
+ * doesn't require the HCD to be added (wusbhc_{create,destroy}) and
+ * the one that requires (phase B, wusbhc_b_{create,destroy}).
+ *
+ * This is so because usb_add_hcd() will start the HC, and thus, all
+ * the HC specific stuff has to be already initialized (like sysfs
+ * thingies).
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include "wusbhc.h"
+
+/**
+ * Extract the wusbhc that corresponds to a USB Host Controller class device
+ *
+ * WARNING! Apply only if @dev is that of a
+ * wusbhc.usb_hcd.self->class_dev; otherwise, you loose.
+ */
+static struct wusbhc *usbhc_dev_to_wusbhc(struct device *dev)
+{
+ struct usb_bus *usb_bus = dev_get_drvdata(dev);
+ struct usb_hcd *usb_hcd = bus_to_hcd(usb_bus);
+ return usb_hcd_to_wusbhc(usb_hcd);
+}
+
+/*
+ * Show & store the current WUSB trust timeout
+ *
+ * We don't do locking--it is an 'atomic' value.
+ *
+ * The units that we store/show are always MILLISECONDS. However, the
+ * value of trust_timeout is jiffies.
+ */
+static ssize_t wusb_trust_timeout_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", wusbhc->trust_timeout);
+}
+
+static ssize_t wusb_trust_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ ssize_t result = -ENOSYS;
+ unsigned trust_timeout;
+
+ result = sscanf(buf, "%u", &trust_timeout);
+ if (result != 1) {
+ result = -EINVAL;
+ goto out;
+ }
+ wusbhc->trust_timeout = min_t(unsigned, trust_timeout, 500);
+ cancel_delayed_work(&wusbhc->keep_alive_timer);
+ flush_workqueue(wusbd);
+ queue_delayed_work(wusbd, &wusbhc->keep_alive_timer,
+ msecs_to_jiffies(wusbhc->trust_timeout / 2));
+out:
+ return result < 0 ? result : size;
+}
+static DEVICE_ATTR_RW(wusb_trust_timeout);
+
+/*
+ * Show the current WUSB CHID.
+ */
+static ssize_t wusb_chid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ const struct wusb_ckhdid *chid;
+
+ if (wusbhc->wuie_host_info != NULL)
+ chid = &wusbhc->wuie_host_info->CHID;
+ else
+ chid = &wusb_ckhdid_zero;
+
+ return sprintf(buf, "%16ph\n", chid->data);
+}
+
+/*
+ * Store a new CHID.
+ *
+ * - Write an all zeros CHID and it will stop the controller
+ * - Write a non-zero CHID and it will start it.
+ *
+ * See wusbhc_chid_set() for more info.
+ */
+static ssize_t wusb_chid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ struct wusb_ckhdid chid;
+ ssize_t result;
+
+ result = sscanf(buf,
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx "
+ "%02hhx %02hhx %02hhx %02hhx\n",
+ &chid.data[0] , &chid.data[1] ,
+ &chid.data[2] , &chid.data[3] ,
+ &chid.data[4] , &chid.data[5] ,
+ &chid.data[6] , &chid.data[7] ,
+ &chid.data[8] , &chid.data[9] ,
+ &chid.data[10], &chid.data[11],
+ &chid.data[12], &chid.data[13],
+ &chid.data[14], &chid.data[15]);
+ if (result != 16) {
+ dev_err(dev, "Unrecognized CHID (need 16 8-bit hex digits): "
+ "%d\n", (int)result);
+ return -EINVAL;
+ }
+ result = wusbhc_chid_set(wusbhc, &chid);
+ return result < 0 ? result : size;
+}
+static DEVICE_ATTR_RW(wusb_chid);
+
+
+static ssize_t wusb_phy_rate_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+ return sprintf(buf, "%d\n", wusbhc->phy_rate);
+}
+
+static ssize_t wusb_phy_rate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ uint8_t phy_rate;
+ ssize_t result;
+
+ result = sscanf(buf, "%hhu", &phy_rate);
+ if (result != 1)
+ return -EINVAL;
+ if (phy_rate >= UWB_PHY_RATE_INVALID)
+ return -EINVAL;
+
+ wusbhc->phy_rate = phy_rate;
+ return size;
+}
+static DEVICE_ATTR_RW(wusb_phy_rate);
+
+static ssize_t wusb_dnts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+ return sprintf(buf, "num slots: %d\ninterval: %dms\n",
+ wusbhc->dnts_num_slots, wusbhc->dnts_interval);
+}
+
+static ssize_t wusb_dnts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ uint8_t num_slots, interval;
+ ssize_t result;
+
+ result = sscanf(buf, "%hhu %hhu", &num_slots, &interval);
+
+ if (result != 2)
+ return -EINVAL;
+
+ wusbhc->dnts_num_slots = num_slots;
+ wusbhc->dnts_interval = interval;
+
+ return size;
+}
+static DEVICE_ATTR_RW(wusb_dnts);
+
+static ssize_t wusb_retry_count_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+
+ return sprintf(buf, "%d\n", wusbhc->retry_count);
+}
+
+static ssize_t wusb_retry_count_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
+ uint8_t retry_count;
+ ssize_t result;
+
+ result = sscanf(buf, "%hhu", &retry_count);
+
+ if (result != 1)
+ return -EINVAL;
+
+ wusbhc->retry_count = max_t(uint8_t, retry_count,
+ WUSB_RETRY_COUNT_MAX);
+
+ return size;
+}
+static DEVICE_ATTR_RW(wusb_retry_count);
+
+/* Group all the WUSBHC attributes */
+static struct attribute *wusbhc_attrs[] = {
+ &dev_attr_wusb_trust_timeout.attr,
+ &dev_attr_wusb_chid.attr,
+ &dev_attr_wusb_phy_rate.attr,
+ &dev_attr_wusb_dnts.attr,
+ &dev_attr_wusb_retry_count.attr,
+ NULL,
+};
+
+static const struct attribute_group wusbhc_attr_group = {
+ .name = NULL, /* we want them in the same directory */
+ .attrs = wusbhc_attrs,
+};
+
+/*
+ * Create a wusbhc instance
+ *
+ * NOTEs:
+ *
+ * - assumes *wusbhc has been zeroed and wusbhc->usb_hcd has been
+ * initialized but not added.
+ *
+ * - fill out ports_max, mmcies_max and mmcie_{add,rm} before calling.
+ *
+ * - fill out wusbhc->uwb_rc and refcount it before calling
+ * - fill out the wusbhc->sec_modes array
+ */
+int wusbhc_create(struct wusbhc *wusbhc)
+{
+ int result = 0;
+
+ /* set defaults. These can be overwritten using sysfs attributes. */
+ wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
+ wusbhc->phy_rate = UWB_PHY_RATE_INVALID - 1;
+ wusbhc->dnts_num_slots = 4;
+ wusbhc->dnts_interval = 2;
+ wusbhc->retry_count = WUSB_RETRY_COUNT_INFINITE;
+
+ mutex_init(&wusbhc->mutex);
+ result = wusbhc_mmcie_create(wusbhc);
+ if (result < 0)
+ goto error_mmcie_create;
+ result = wusbhc_devconnect_create(wusbhc);
+ if (result < 0)
+ goto error_devconnect_create;
+ result = wusbhc_rh_create(wusbhc);
+ if (result < 0)
+ goto error_rh_create;
+ result = wusbhc_sec_create(wusbhc);
+ if (result < 0)
+ goto error_sec_create;
+ return 0;
+
+error_sec_create:
+ wusbhc_rh_destroy(wusbhc);
+error_rh_create:
+ wusbhc_devconnect_destroy(wusbhc);
+error_devconnect_create:
+ wusbhc_mmcie_destroy(wusbhc);
+error_mmcie_create:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_create);
+
+static inline struct kobject *wusbhc_kobj(struct wusbhc *wusbhc)
+{
+ return &wusbhc->usb_hcd.self.controller->kobj;
+}
+
+/*
+ * Phase B of a wusbhc instance creation
+ *
+ * Creates fields that depend on wusbhc->usb_hcd having been
+ * added. This is where we create the sysfs files in
+ * /sys/class/usb_host/usb_hostX/.
+ *
+ * NOTE: Assumes wusbhc->usb_hcd has been already added by the upper
+ * layer (hwahc or whci)
+ */
+int wusbhc_b_create(struct wusbhc *wusbhc)
+{
+ int result = 0;
+ struct device *dev = wusbhc->usb_hcd.self.controller;
+
+ result = sysfs_create_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
+ if (result < 0) {
+ dev_err(dev, "Cannot register WUSBHC attributes: %d\n",
+ result);
+ goto error_create_attr_group;
+ }
+
+ return 0;
+error_create_attr_group:
+ return result;
+}
+EXPORT_SYMBOL_GPL(wusbhc_b_create);
+
+void wusbhc_b_destroy(struct wusbhc *wusbhc)
+{
+ wusbhc_pal_unregister(wusbhc);
+ sysfs_remove_group(wusbhc_kobj(wusbhc), &wusbhc_attr_group);
+}
+EXPORT_SYMBOL_GPL(wusbhc_b_destroy);
+
+void wusbhc_destroy(struct wusbhc *wusbhc)
+{
+ wusbhc_sec_destroy(wusbhc);
+ wusbhc_rh_destroy(wusbhc);
+ wusbhc_devconnect_destroy(wusbhc);
+ wusbhc_mmcie_destroy(wusbhc);
+}
+EXPORT_SYMBOL_GPL(wusbhc_destroy);
+
+struct workqueue_struct *wusbd;
+EXPORT_SYMBOL_GPL(wusbd);
+
+/*
+ * WUSB Cluster ID allocation map
+ *
+ * Each WUSB bus in a channel is identified with a Cluster Id in the
+ * unauth address pace (WUSB1.0[4.3]). We take the range 0xe0 to 0xff
+ * (that's space for 31 WUSB controllers, as 0xff can't be taken). We
+ * start taking from 0xff, 0xfe, 0xfd... (hence the += or -= 0xff).
+ *
+ * For each one we taken, we pin it in the bitap
+ */
+#define CLUSTER_IDS 32
+static DECLARE_BITMAP(wusb_cluster_id_table, CLUSTER_IDS);
+static DEFINE_SPINLOCK(wusb_cluster_ids_lock);
+
+/*
+ * Get a WUSB Cluster ID
+ *
+ * Need to release with wusb_cluster_id_put() when done w/ it.
+ */
+/* FIXME: coordinate with the choose_addres() from the USB stack */
+/* we want to leave the top of the 128 range for cluster addresses and
+ * the bottom for device addresses (as we map them one on one with
+ * ports). */
+u8 wusb_cluster_id_get(void)
+{
+ u8 id;
+ spin_lock(&wusb_cluster_ids_lock);
+ id = find_first_zero_bit(wusb_cluster_id_table, CLUSTER_IDS);
+ if (id >= CLUSTER_IDS) {
+ id = 0;
+ goto out;
+ }
+ set_bit(id, wusb_cluster_id_table);
+ id = (u8) 0xff - id;
+out:
+ spin_unlock(&wusb_cluster_ids_lock);
+ return id;
+
+}
+EXPORT_SYMBOL_GPL(wusb_cluster_id_get);
+
+/*
+ * Release a WUSB Cluster ID
+ *
+ * Obtained it with wusb_cluster_id_get()
+ */
+void wusb_cluster_id_put(u8 id)
+{
+ id = 0xff - id;
+ BUG_ON(id >= CLUSTER_IDS);
+ spin_lock(&wusb_cluster_ids_lock);
+ WARN_ON(!test_bit(id, wusb_cluster_id_table));
+ clear_bit(id, wusb_cluster_id_table);
+ spin_unlock(&wusb_cluster_ids_lock);
+}
+EXPORT_SYMBOL_GPL(wusb_cluster_id_put);
+
+/**
+ * wusbhc_giveback_urb - return an URB to the USB core
+ * @wusbhc: the host controller the URB is from.
+ * @urb: the URB.
+ * @status: the URB's status.
+ *
+ * Return an URB to the USB core doing some additional WUSB specific
+ * processing.
+ *
+ * - After a successful transfer, update the trust timeout timestamp
+ * for the WUSB device.
+ *
+ * - [WUSB] sections 4.13 and 7.5.1 specify the stop retransmission
+ * condition for the WCONNECTACK_IE is that the host has observed
+ * the associated device responding to a control transfer.
+ */
+void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb, int status)
+{
+ struct wusb_dev *wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc,
+ urb->dev);
+
+ if (status == 0 && wusb_dev) {
+ wusb_dev->entry_ts = jiffies;
+
+ /* wusbhc_devconnect_acked() can't be called from
+ atomic context so defer it to a work queue. */
+ if (!list_empty(&wusb_dev->cack_node))
+ queue_work(wusbd, &wusb_dev->devconnect_acked_work);
+ else
+ wusb_dev_put(wusb_dev);
+ }
+
+ usb_hcd_giveback_urb(&wusbhc->usb_hcd, urb, status);
+}
+EXPORT_SYMBOL_GPL(wusbhc_giveback_urb);
+
+/**
+ * wusbhc_reset_all - reset the HC hardware
+ * @wusbhc: the host controller to reset.
+ *
+ * Request a full hardware reset of the chip. This will also reset
+ * the radio controller and any other PALs.
+ */
+void wusbhc_reset_all(struct wusbhc *wusbhc)
+{
+ if (wusbhc->uwb_rc)
+ uwb_rc_reset_all(wusbhc->uwb_rc);
+}
+EXPORT_SYMBOL_GPL(wusbhc_reset_all);
+
+static struct notifier_block wusb_usb_notifier = {
+ .notifier_call = wusb_usb_ncb,
+ .priority = INT_MAX /* Need to be called first of all */
+};
+
+static int __init wusbcore_init(void)
+{
+ int result;
+ result = wusb_crypto_init();
+ if (result < 0)
+ goto error_crypto_init;
+ /* WQ is singlethread because we need to serialize notifications */
+ wusbd = create_singlethread_workqueue("wusbd");
+ if (wusbd == NULL) {
+ result = -ENOMEM;
+ printk(KERN_ERR "WUSB-core: Cannot create wusbd workqueue\n");
+ goto error_wusbd_create;
+ }
+ usb_register_notify(&wusb_usb_notifier);
+ bitmap_zero(wusb_cluster_id_table, CLUSTER_IDS);
+ set_bit(0, wusb_cluster_id_table); /* reserve Cluster ID 0xff */
+ return 0;
+
+error_wusbd_create:
+ wusb_crypto_exit();
+error_crypto_init:
+ return result;
+
+}
+module_init(wusbcore_init);
+
+static void __exit wusbcore_exit(void)
+{
+ clear_bit(0, wusb_cluster_id_table);
+ if (!bitmap_empty(wusb_cluster_id_table, CLUSTER_IDS)) {
+ printk(KERN_ERR "BUG: WUSB Cluster IDs not released on exit: %*pb\n",
+ CLUSTER_IDS, wusb_cluster_id_table);
+ WARN_ON(1);
+ }
+ usb_unregister_notify(&wusb_usb_notifier);
+ destroy_workqueue(wusbd);
+ wusb_crypto_exit();
+}
+module_exit(wusbcore_exit);
+
+MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
+MODULE_DESCRIPTION("Wireless USB core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wusbcore/wusbhc.h b/drivers/staging/wusbcore/wusbhc.h
new file mode 100644
index 000000000000..716244a2ec44
--- /dev/null
+++ b/drivers/staging/wusbcore/wusbhc.h
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Wireless USB Host Controller
+ * Common infrastructure for WHCI and HWA WUSB-HC drivers
+ *
+ *
+ * Copyright (C) 2005-2006 Intel Corporation
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
+ *
+ * This driver implements parts common to all Wireless USB Host
+ * Controllers (struct wusbhc, embedding a struct usb_hcd) and is used
+ * by:
+ *
+ * - hwahc: HWA, USB-dongle that implements a Wireless USB host
+ * controller, (Wireless USB 1.0 Host-Wire-Adapter specification).
+ *
+ * - whci: WHCI, a PCI card with a wireless host controller
+ * (Wireless Host Controller Interface 1.0 specification).
+ *
+ * Check out the Design-overview.txt file in the source documentation
+ * for other details on the implementation.
+ *
+ * Main blocks:
+ *
+ * rh Root Hub emulation (part of the HCD glue)
+ *
+ * devconnect Handle all the issues related to device connection,
+ * authentication, disconnection, timeout, reseting,
+ * keepalives, etc.
+ *
+ * mmc MMC IE broadcasting handling
+ *
+ * A host controller driver just initializes its stuff and as part of
+ * that, creates a 'struct wusbhc' instance that handles all the
+ * common WUSB mechanisms. Links in the function ops that are specific
+ * to it and then registers the host controller. Ready to run.
+ */
+
+#ifndef __WUSBHC_H__
+#define __WUSBHC_H__
+
+#include <linux/usb.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
+#include <linux/usb/hcd.h>
+#include "../uwb/uwb.h"
+#include "include/wusb.h"
+
+/*
+ * Time from a WUSB channel stop request to the last transmitted MMC.
+ *
+ * This needs to be > 4.096 ms in case no MMCs can be transmitted in
+ * zone 0.
+ */
+#define WUSB_CHANNEL_STOP_DELAY_MS 8
+#define WUSB_RETRY_COUNT_MAX 15
+#define WUSB_RETRY_COUNT_INFINITE 0
+
+/**
+ * Wireless USB device
+ *
+ * Describe a WUSB device connected to the cluster. This struct
+ * belongs to the 'struct wusb_port' it is attached to and it is
+ * responsible for putting and clearing the pointer to it.
+ *
+ * Note this "complements" the 'struct usb_device' that the usb_hcd
+ * keeps for each connected USB device. However, it extends some
+ * information that is not available (there is no hcpriv ptr in it!)
+ * *and* most importantly, it's life cycle is different. It is created
+ * as soon as we get a DN_Connect (connect request notification) from
+ * the device through the WUSB host controller; the USB stack doesn't
+ * create the device until we authenticate it. FIXME: this will
+ * change.
+ *
+ * @bos: This is allocated when the BOS descriptors are read from
+ * the device and freed upon the wusb_dev struct dying.
+ * @wusb_cap_descr: points into @bos, and has been verified to be size
+ * safe.
+ */
+struct wusb_dev {
+ struct kref refcnt;
+ struct wusbhc *wusbhc;
+ struct list_head cack_node; /* Connect-Ack list */
+ struct list_head rekey_node; /* GTK rekey list */
+ u8 port_idx;
+ u8 addr;
+ u8 beacon_type:4;
+ struct usb_encryption_descriptor ccm1_etd;
+ struct wusb_ckhdid cdid;
+ unsigned long entry_ts;
+ struct usb_bos_descriptor *bos;
+ struct usb_wireless_cap_descriptor *wusb_cap_descr;
+ struct uwb_mas_bm availability;
+ struct work_struct devconnect_acked_work;
+ struct usb_device *usb_dev;
+};
+
+#define WUSB_DEV_ADDR_UNAUTH 0x80
+
+static inline void wusb_dev_init(struct wusb_dev *wusb_dev)
+{
+ kref_init(&wusb_dev->refcnt);
+ /* no need to init the cack_node */
+}
+
+extern void wusb_dev_destroy(struct kref *_wusb_dev);
+
+static inline struct wusb_dev *wusb_dev_get(struct wusb_dev *wusb_dev)
+{
+ kref_get(&wusb_dev->refcnt);
+ return wusb_dev;
+}
+
+static inline void wusb_dev_put(struct wusb_dev *wusb_dev)
+{
+ kref_put(&wusb_dev->refcnt, wusb_dev_destroy);
+}
+
+/**
+ * Wireless USB Host Controller root hub "fake" ports
+ * (state and device information)
+ *
+ * Wireless USB is wireless, so there are no ports; but we
+ * fake'em. Each RC can connect a max of devices at the same time
+ * (given in the Wireless Adapter descriptor, bNumPorts or WHCI's
+ * caps), referred to in wusbhc->ports_max.
+ *
+ * See rh.c for more information.
+ *
+ * The @status and @change use the same bits as in USB2.0[11.24.2.7],
+ * so we don't have to do much when getting the port's status.
+ *
+ * WUSB1.0[7.1], USB2.0[11.24.2.7.1,fig 11-10],
+ * include/linux/usb_ch9.h (#define USB_PORT_STAT_*)
+ */
+struct wusb_port {
+ u16 status;
+ u16 change;
+ struct wusb_dev *wusb_dev; /* connected device's info */
+ u32 ptk_tkid;
+};
+
+/**
+ * WUSB Host Controller specifics
+ *
+ * All fields that are common to all Wireless USB controller types
+ * (HWA and WHCI) are grouped here. Host Controller
+ * functions/operations that only deal with general Wireless USB HC
+ * issues use this data type to refer to the host.
+ *
+ * @usb_hcd Instantiation of a USB host controller
+ * (initialized by upper layer [HWA=HC or WHCI].
+ *
+ * @dev Device that implements this; initialized by the
+ * upper layer (HWA-HC, WHCI...); this device should
+ * have a refcount.
+ *
+ * @trust_timeout After this time without hearing for device
+ * activity, we consider the device gone and we have to
+ * re-authenticate.
+ *
+ * Can be accessed w/o locking--however, read to a
+ * local variable then use.
+ *
+ * @chid WUSB Cluster Host ID: this is supposed to be a
+ * unique value that doesn't change across reboots (so
+ * that your devices do not require re-association).
+ *
+ * Read/Write protected by @mutex
+ *
+ * @dev_info This array has ports_max elements. It is used to
+ * give the HC information about the WUSB devices (see
+ * 'struct wusb_dev_info').
+ *
+ * For HWA we need to allocate it in heap; for WHCI it
+ * needs to be permanently mapped, so we keep it for
+ * both and make it easy. Call wusbhc->dev_info_set()
+ * to update an entry.
+ *
+ * @ports_max Number of simultaneous device connections (fake
+ * ports) this HC will take. Read-only.
+ *
+ * @port Array of port status for each fake root port. Guaranteed to
+ * always be the same length during device existence
+ * [this allows for some unlocked but referenced reading].
+ *
+ * @mmcies_max Max number of Information Elements this HC can send
+ * in its MMC. Read-only.
+ *
+ * @start Start the WUSB channel.
+ *
+ * @stop Stop the WUSB channel after the specified number of
+ * milliseconds. Channel Stop IEs should be transmitted
+ * as required by [WUSB] 4.16.2.1.
+ *
+ * @mmcie_add HC specific operation (WHCI or HWA) for adding an
+ * MMCIE.
+ *
+ * @mmcie_rm HC specific operation (WHCI or HWA) for removing an
+ * MMCIE.
+ *
+ * @set_ptk: Set the PTK and enable encryption for a device. Or, if
+ * the supplied key is NULL, disable encryption for that
+ * device.
+ *
+ * @set_gtk: Set the GTK to be used for all future broadcast packets
+ * (i.e., MMCs). With some hardware, setting the GTK may start
+ * MMC transmission.
+ *
+ * NOTE:
+ *
+ * - If wusb_dev->usb_dev is not NULL, then usb_dev is valid
+ * (wusb_dev has a refcount on it). Likewise, if usb_dev->wusb_dev
+ * is not NULL, usb_dev->wusb_dev is valid (usb_dev keeps a
+ * refcount on it).
+ *
+ * Most of the times when you need to use it, it will be non-NULL,
+ * so there is no real need to check for it (wusb_dev will
+ * disappear before usb_dev).
+ *
+ * - The following fields need to be filled out before calling
+ * wusbhc_create(): ports_max, mmcies_max, mmcie_{add,rm}.
+ *
+ * - there is no wusbhc_init() method, we do everything in
+ * wusbhc_create().
+ *
+ * - Creation is done in two phases, wusbhc_create() and
+ * wusbhc_create_b(); b are the parts that need to be called after
+ * calling usb_hcd_add(&wusbhc->usb_hcd).
+ */
+struct wusbhc {
+ struct usb_hcd usb_hcd; /* HAS TO BE 1st */
+ struct device *dev;
+ struct uwb_rc *uwb_rc;
+ struct uwb_pal pal;
+
+ unsigned trust_timeout; /* in jiffies */
+ struct wusb_ckhdid chid;
+ uint8_t phy_rate;
+ uint8_t dnts_num_slots;
+ uint8_t dnts_interval;
+ uint8_t retry_count;
+ struct wuie_host_info *wuie_host_info;
+
+ struct mutex mutex; /* locks everything else */
+ u16 cluster_id; /* Wireless USB Cluster ID */
+ struct wusb_port *port; /* Fake port status handling */
+ struct wusb_dev_info *dev_info; /* for Set Device Info mgmt */
+ u8 ports_max;
+ unsigned active:1; /* currently xmit'ing MMCs */
+ struct wuie_keep_alive keep_alive_ie; /* protected by mutex */
+ struct delayed_work keep_alive_timer;
+ struct list_head cack_list; /* Connect acknowledging */
+ size_t cack_count; /* protected by 'mutex' */
+ struct wuie_connect_ack cack_ie;
+ struct uwb_rsv *rsv; /* cluster bandwidth reservation */
+
+ struct mutex mmcie_mutex; /* MMC WUIE handling */
+ struct wuie_hdr **mmcie; /* WUIE array */
+ u8 mmcies_max;
+ /* FIXME: make wusbhc_ops? */
+ int (*start)(struct wusbhc *wusbhc);
+ void (*stop)(struct wusbhc *wusbhc, int delay);
+ int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
+ u8 handle, struct wuie_hdr *wuie);
+ int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle);
+ int (*dev_info_set)(struct wusbhc *, struct wusb_dev *wusb_dev);
+ int (*bwa_set)(struct wusbhc *wusbhc, s8 stream_index,
+ const struct uwb_mas_bm *);
+ int (*set_ptk)(struct wusbhc *wusbhc, u8 port_idx,
+ u32 tkid, const void *key, size_t key_size);
+ int (*set_gtk)(struct wusbhc *wusbhc,
+ u32 tkid, const void *key, size_t key_size);
+ int (*set_num_dnts)(struct wusbhc *wusbhc, u8 interval, u8 slots);
+
+ struct {
+ struct usb_key_descriptor descr;
+ u8 data[16]; /* GTK key data */
+ } __attribute__((packed)) gtk;
+ u8 gtk_index;
+ u32 gtk_tkid;
+
+ /* workqueue for WUSB security related tasks. */
+ struct workqueue_struct *wq_security;
+ struct work_struct gtk_rekey_work;
+
+ struct usb_encryption_descriptor *ccm1_etd;
+};
+
+#define usb_hcd_to_wusbhc(u) container_of((u), struct wusbhc, usb_hcd)
+
+
+extern int wusbhc_create(struct wusbhc *);
+extern int wusbhc_b_create(struct wusbhc *);
+extern void wusbhc_b_destroy(struct wusbhc *);
+extern void wusbhc_destroy(struct wusbhc *);
+extern int wusb_dev_sysfs_add(struct wusbhc *, struct usb_device *,
+ struct wusb_dev *);
+extern void wusb_dev_sysfs_rm(struct wusb_dev *);
+extern int wusbhc_sec_create(struct wusbhc *);
+extern int wusbhc_sec_start(struct wusbhc *);
+extern void wusbhc_sec_stop(struct wusbhc *);
+extern void wusbhc_sec_destroy(struct wusbhc *);
+extern void wusbhc_giveback_urb(struct wusbhc *wusbhc, struct urb *urb,
+ int status);
+void wusbhc_reset_all(struct wusbhc *wusbhc);
+
+int wusbhc_pal_register(struct wusbhc *wusbhc);
+void wusbhc_pal_unregister(struct wusbhc *wusbhc);
+
+/*
+ * Return @usb_dev's @usb_hcd (properly referenced) or NULL if gone
+ *
+ * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
+ *
+ * This is a safe assumption as @usb_dev->bus is referenced all the
+ * time during the @usb_dev life cycle.
+ */
+static inline
+struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
+{
+ struct usb_hcd *usb_hcd;
+ usb_hcd = bus_to_hcd(usb_dev->bus);
+ return usb_get_hcd(usb_hcd);
+}
+
+/*
+ * Increment the reference count on a wusbhc.
+ *
+ * @wusbhc's life cycle is identical to that of the underlying usb_hcd.
+ */
+static inline struct wusbhc *wusbhc_get(struct wusbhc *wusbhc)
+{
+ return usb_get_hcd(&wusbhc->usb_hcd) ? wusbhc : NULL;
+}
+
+/*
+ * Return the wusbhc associated to a @usb_dev
+ *
+ * @usb_dev: USB device, UNLOCKED and referenced (or otherwise, safe ptr)
+ *
+ * @returns: wusbhc for @usb_dev; NULL if the @usb_dev is being torn down.
+ * WARNING: referenced at the usb_hcd level, unlocked
+ *
+ * FIXME: move offline
+ */
+static inline struct wusbhc *wusbhc_get_by_usb_dev(struct usb_device *usb_dev)
+{
+ struct wusbhc *wusbhc = NULL;
+ struct usb_hcd *usb_hcd;
+ if (usb_dev->devnum > 1 && !usb_dev->wusb) {
+ /* but root hubs */
+ dev_err(&usb_dev->dev, "devnum %d wusb %d\n", usb_dev->devnum,
+ usb_dev->wusb);
+ BUG_ON(usb_dev->devnum > 1 && !usb_dev->wusb);
+ }
+ usb_hcd = usb_hcd_get_by_usb_dev(usb_dev);
+ if (usb_hcd == NULL)
+ return NULL;
+ BUG_ON(usb_hcd->wireless == 0);
+ return wusbhc = usb_hcd_to_wusbhc(usb_hcd);
+}
+
+
+static inline void wusbhc_put(struct wusbhc *wusbhc)
+{
+ usb_put_hcd(&wusbhc->usb_hcd);
+}
+
+int wusbhc_start(struct wusbhc *wusbhc);
+void wusbhc_stop(struct wusbhc *wusbhc);
+extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *);
+
+/* Device connect handling */
+extern int wusbhc_devconnect_create(struct wusbhc *);
+extern void wusbhc_devconnect_destroy(struct wusbhc *);
+extern int wusbhc_devconnect_start(struct wusbhc *wusbhc);
+extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc);
+extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr,
+ struct wusb_dn_hdr *dn_hdr, size_t size);
+extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port);
+extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val,
+ void *priv);
+extern int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
+ u8 addr);
+
+/* Wireless USB fake Root Hub methods */
+extern int wusbhc_rh_create(struct wusbhc *);
+extern void wusbhc_rh_destroy(struct wusbhc *);
+
+extern int wusbhc_rh_status_data(struct usb_hcd *, char *);
+extern int wusbhc_rh_control(struct usb_hcd *, u16, u16, u16, char *, u16);
+extern int wusbhc_rh_start_port_reset(struct usb_hcd *, unsigned);
+
+/* MMC handling */
+extern int wusbhc_mmcie_create(struct wusbhc *);
+extern void wusbhc_mmcie_destroy(struct wusbhc *);
+extern int wusbhc_mmcie_set(struct wusbhc *, u8 interval, u8 repeat_cnt,
+ struct wuie_hdr *);
+extern void wusbhc_mmcie_rm(struct wusbhc *, struct wuie_hdr *);
+
+/* Bandwidth reservation */
+int wusbhc_rsv_establish(struct wusbhc *wusbhc);
+void wusbhc_rsv_terminate(struct wusbhc *wusbhc);
+
+/*
+ * I've always said
+ * I wanted a wedding in a church...
+ *
+ * but lately I've been thinking about
+ * the Botanical Gardens.
+ *
+ * We could do it by the tulips.
+ * It'll be beautiful
+ *
+ * --Security!
+ */
+extern int wusb_dev_sec_add(struct wusbhc *, struct usb_device *,
+ struct wusb_dev *);
+extern void wusb_dev_sec_rm(struct wusb_dev *) ;
+extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *,
+ struct wusb_ckhdid *ck);
+void wusbhc_gtk_rekey(struct wusbhc *wusbhc);
+int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev);
+
+
+/* WUSB Cluster ID handling */
+extern u8 wusb_cluster_id_get(void);
+extern void wusb_cluster_id_put(u8);
+
+/*
+ * wusb_port_by_idx - return the port associated to a zero-based port index
+ *
+ * NOTE: valid without locking as long as wusbhc is referenced (as the
+ * number of ports doesn't change). The data pointed to has to
+ * be verified though :)
+ */
+static inline struct wusb_port *wusb_port_by_idx(struct wusbhc *wusbhc,
+ u8 port_idx)
+{
+ return &wusbhc->port[port_idx];
+}
+
+/*
+ * wusb_port_no_to_idx - Convert port number (per usb_dev->portnum) to
+ * a port_idx.
+ *
+ * USB stack USB ports are 1 based!!
+ *
+ * NOTE: only valid for WUSB devices!!!
+ */
+static inline u8 wusb_port_no_to_idx(u8 port_no)
+{
+ return port_no - 1;
+}
+
+extern struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *,
+ struct usb_device *);
+
+/*
+ * Return a referenced wusb_dev given a @usb_dev
+ *
+ * Returns NULL if the usb_dev is being torn down.
+ *
+ * FIXME: move offline
+ */
+static inline
+struct wusb_dev *wusb_dev_get_by_usb_dev(struct usb_device *usb_dev)
+{
+ struct wusbhc *wusbhc;
+ struct wusb_dev *wusb_dev;
+ wusbhc = wusbhc_get_by_usb_dev(usb_dev);
+ if (wusbhc == NULL)
+ return NULL;
+ mutex_lock(&wusbhc->mutex);
+ wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev);
+ mutex_unlock(&wusbhc->mutex);
+ wusbhc_put(wusbhc);
+ return wusb_dev;
+}
+
+/* Misc */
+
+extern struct workqueue_struct *wusbd;
+#endif /* #ifndef __WUSBHC_H__ */