diff options
author | Alan Stern <stern@rowland.harvard.edu> | 2005-12-17 17:58:46 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2006-03-20 14:49:57 -0800 |
commit | dccf4a48d47120a42382ba526f1a0848c13ba2a4 (patch) | |
tree | 788a0a9f491d1a42df1dee1781156ccfc363b6ef /drivers/usb | |
parent | 499003e815344304c7b0c93aad923ddf644d24e0 (diff) | |
download | linux-dccf4a48d47120a42382ba526f1a0848c13ba2a4.tar.bz2 |
[PATCH] UHCI: use one QH per endpoint, not per URB
This patch (as623) changes the uhci-hcd driver to make it use one QH per
device endpoint, instead of a QH per URB as it does now. Numerous areas
of the code are affected by this. For example, the distinction between
"queued" URBs and non-"queued" URBs no longer exists; all URBs belong to
a queue and some just happen to be at the queue's head.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/usb')
-rw-r--r-- | drivers/usb/host/uhci-debug.c | 320 | ||||
-rw-r--r-- | drivers/usb/host/uhci-hcd.c | 65 | ||||
-rw-r--r-- | drivers/usb/host/uhci-hcd.h | 177 | ||||
-rw-r--r-- | drivers/usb/host/uhci-q.c | 985 |
4 files changed, 685 insertions, 862 deletions
diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c index 5832953086f8..3faccbd68547 100644 --- a/drivers/usb/host/uhci-debug.c +++ b/drivers/usb/host/uhci-debug.c @@ -90,13 +90,60 @@ static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space) return out - buf; } -static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space) +static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space) { char *out = buf; - struct urb_priv *urbp; - struct list_head *head, *tmp; struct uhci_td *td; - int i = 0, checked = 0, prevactive = 0; + int i, nactive, ninactive; + + if (len < 200) + return 0; + + out += sprintf(out, "urb_priv [%p] ", urbp); + out += sprintf(out, "urb [%p] ", urbp->urb); + out += sprintf(out, "qh [%p] ", urbp->qh); + out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe)); + out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe), + (usb_pipein(urbp->urb->pipe) ? "IN" : "OUT")); + + switch (usb_pipetype(urbp->urb->pipe)) { + case PIPE_ISOCHRONOUS: out += sprintf(out, "ISO"); break; + case PIPE_INTERRUPT: out += sprintf(out, "INT"); break; + case PIPE_BULK: out += sprintf(out, "BLK"); break; + case PIPE_CONTROL: out += sprintf(out, "CTL"); break; + } + + out += sprintf(out, "%s", (urbp->fsbr ? " FSBR" : "")); + out += sprintf(out, "%s", (urbp->fsbr_timeout ? " FSBR_TO" : "")); + + if (urbp->urb->status != -EINPROGRESS) + out += sprintf(out, " Status=%d", urbp->urb->status); + out += sprintf(out, "\n"); + + i = nactive = ninactive = 0; + list_for_each_entry(td, &urbp->td_list, list) { + if (++i <= 10 || debug > 2) { + out += sprintf(out, "%*s%d: ", space + 2, "", i); + out += uhci_show_td(td, out, len - (out - buf), 0); + } else { + if (td_status(td) & TD_CTRL_ACTIVE) + ++nactive; + else + ++ninactive; + } + } + if (nactive + ninactive > 0) + out += sprintf(out, "%*s[skipped %d inactive and %d active " + "TDs]\n", + space, "", ninactive, nactive); + + return out - buf; +} + +static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space) +{ + char *out = buf; + int i, nurbs; __le32 element = qh_element(qh); /* Try to make sure there's enough memory */ @@ -118,86 +165,36 @@ static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space) if (!(element & ~(UHCI_PTR_QH | UHCI_PTR_DEPTH))) out += sprintf(out, "%*s Element is NULL (bug?)\n", space, ""); - if (!qh->urbp) { - out += sprintf(out, "%*s urbp == NULL\n", space, ""); - goto out; - } - - urbp = qh->urbp; - - head = &urbp->td_list; - tmp = head->next; - - td = list_entry(tmp, struct uhci_td, list); - - if (cpu_to_le32(td->dma_handle) != (element & ~UHCI_PTR_BITS)) - out += sprintf(out, "%*s Element != First TD\n", space, ""); - - while (tmp != head) { - struct uhci_td *td = list_entry(tmp, struct uhci_td, list); - - tmp = tmp->next; - - out += sprintf(out, "%*s%d: ", space + 2, "", i++); - out += uhci_show_td(td, out, len - (out - buf), 0); - - if (i > 10 && !checked && prevactive && tmp != head && - debug <= 2) { - struct list_head *ntmp = tmp; - struct uhci_td *ntd = td; - int active = 1, ni = i; - - checked = 1; - - while (ntmp != head && ntmp->next != head && active) { - ntd = list_entry(ntmp, struct uhci_td, list); - - ntmp = ntmp->next; - - active = td_status(ntd) & TD_CTRL_ACTIVE; - - ni++; - } - - if (active && ni > i) { - out += sprintf(out, "%*s[skipped %d active TDs]\n", space, "", ni - i); - tmp = ntmp; - td = ntd; - i = ni; - } + if (list_empty(&qh->queue)) { + out += sprintf(out, "%*s queue is empty\n", space, ""); + } else { + struct urb_priv *urbp = list_entry(qh->queue.next, + struct urb_priv, node); + struct uhci_td *td = list_entry(urbp->td_list.next, + struct uhci_td, list); + + if (cpu_to_le32(td->dma_handle) != (element & ~UHCI_PTR_BITS)) + out += sprintf(out, "%*s Element != First TD\n", + space, ""); + i = nurbs = 0; + list_for_each_entry(urbp, &qh->queue, node) { + if (++i <= 10) + out += uhci_show_urbp(urbp, out, + len - (out - buf), space + 2); + else + ++nurbs; } - - prevactive = td_status(td) & TD_CTRL_ACTIVE; - } - - if (list_empty(&urbp->queue_list) || urbp->queued) - goto out; - - out += sprintf(out, "%*sQueued QHs:\n", -space, "--"); - - head = &urbp->queue_list; - tmp = head->next; - - while (tmp != head) { - struct urb_priv *nurbp = list_entry(tmp, struct urb_priv, - queue_list); - tmp = tmp->next; - - out += uhci_show_qh(nurbp->qh, out, len - (out - buf), space); + if (nurbs > 0) + out += sprintf(out, "%*s Skipped %d URBs\n", + space, "", nurbs); } -out: return out - buf; } -#define show_frame_num() \ - if (!shown) { \ - shown = 1; \ - out += sprintf(out, "- Frame %d\n", i); \ - } - #ifdef CONFIG_PROC_FS static const char * const qh_names[] = { + "skel_unlink_qh", "skel_iso_qh", "skel_int128_qh", "skel_int64_qh", "skel_int32_qh", "skel_int16_qh", "skel_int8_qh", "skel_int4_qh", @@ -206,12 +203,6 @@ static const char * const qh_names[] = { "skel_bulk_qh", "skel_term_qh" }; -#define show_qh_name() \ - if (!shown) { \ - shown = 1; \ - out += sprintf(out, "- %s\n", qh_names[i]); \ - } - static int uhci_show_sc(int port, unsigned short status, char *buf, int len) { char *out = buf; @@ -321,139 +312,29 @@ static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len) return out - buf; } -static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp, char *buf, int len) -{ - struct list_head *tmp; - char *out = buf; - int count = 0; - - if (len < 200) - return 0; - - out += sprintf(out, "urb_priv [%p] ", urbp); - out += sprintf(out, "urb [%p] ", urbp->urb); - out += sprintf(out, "qh [%p] ", urbp->qh); - out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe)); - out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe), (usb_pipein(urbp->urb->pipe) ? "IN" : "OUT")); - - switch (usb_pipetype(urbp->urb->pipe)) { - case PIPE_ISOCHRONOUS: out += sprintf(out, "ISO "); break; - case PIPE_INTERRUPT: out += sprintf(out, "INT "); break; - case PIPE_BULK: out += sprintf(out, "BLK "); break; - case PIPE_CONTROL: out += sprintf(out, "CTL "); break; - } - - out += sprintf(out, "%s", (urbp->fsbr ? "FSBR " : "")); - out += sprintf(out, "%s", (urbp->fsbr_timeout ? "FSBR_TO " : "")); - - if (urbp->urb->status != -EINPROGRESS) - out += sprintf(out, "Status=%d ", urbp->urb->status); - //out += sprintf(out, "FSBRtime=%lx ",urbp->fsbrtime); - - count = 0; - list_for_each(tmp, &urbp->td_list) - count++; - out += sprintf(out, "TDs=%d ",count); - - if (urbp->queued) - out += sprintf(out, "queued\n"); - else { - count = 0; - list_for_each(tmp, &urbp->queue_list) - count++; - out += sprintf(out, "queued URBs=%d\n", count); - } - - return out - buf; -} - -static int uhci_show_lists(struct uhci_hcd *uhci, char *buf, int len) -{ - char *out = buf; - struct list_head *head, *tmp; - int count; - - out += sprintf(out, "Main list URBs:"); - if (list_empty(&uhci->urb_list)) - out += sprintf(out, " Empty\n"); - else { - out += sprintf(out, "\n"); - count = 0; - head = &uhci->urb_list; - tmp = head->next; - while (tmp != head) { - struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list); - - out += sprintf(out, " %d: ", ++count); - out += uhci_show_urbp(uhci, urbp, out, len - (out - buf)); - tmp = tmp->next; - } - } - - out += sprintf(out, "Remove list URBs:"); - if (list_empty(&uhci->urb_remove_list)) - out += sprintf(out, " Empty\n"); - else { - out += sprintf(out, "\n"); - count = 0; - head = &uhci->urb_remove_list; - tmp = head->next; - while (tmp != head) { - struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list); - - out += sprintf(out, " %d: ", ++count); - out += uhci_show_urbp(uhci, urbp, out, len - (out - buf)); - tmp = tmp->next; - } - } - - out += sprintf(out, "Complete list URBs:"); - if (list_empty(&uhci->complete_list)) - out += sprintf(out, " Empty\n"); - else { - out += sprintf(out, "\n"); - count = 0; - head = &uhci->complete_list; - tmp = head->next; - while (tmp != head) { - struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list); - - out += sprintf(out, " %d: ", ++count); - out += uhci_show_urbp(uhci, urbp, out, len - (out - buf)); - tmp = tmp->next; - } - } - - return out - buf; -} - static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) { - unsigned long flags; char *out = buf; int i, j; struct uhci_qh *qh; struct uhci_td *td; struct list_head *tmp, *head; - spin_lock_irqsave(&uhci->lock, flags); - out += uhci_show_root_hub_state(uhci, out, len - (out - buf)); out += sprintf(out, "HC status\n"); out += uhci_show_status(uhci, out, len - (out - buf)); + if (debug <= 1) + return out - buf; out += sprintf(out, "Frame List\n"); for (i = 0; i < UHCI_NUMFRAMES; ++i) { - int shown = 0; td = uhci->frame_cpu[i]; if (!td) continue; - if (td->dma_handle != (dma_addr_t)uhci->frame[i]) { - show_frame_num(); + out += sprintf(out, "- Frame %d\n", i); \ + if (td->dma_handle != (dma_addr_t)uhci->frame[i]) out += sprintf(out, " frame list does not match td->dma_handle!\n"); - } - show_frame_num(); head = &td->fl_list; tmp = head; @@ -467,14 +348,11 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) out += sprintf(out, "Skeleton QHs\n"); for (i = 0; i < UHCI_NUM_SKELQH; ++i) { - int shown = 0; + int cnt = 0; qh = uhci->skelqh[i]; - - if (debug > 1) { - show_qh_name(); - out += uhci_show_qh(qh, out, len - (out - buf), 4); - } + out += sprintf(out, "- %s\n", qh_names[i]); \ + out += uhci_show_qh(qh, out, len - (out - buf), 4); /* Last QH is the Terminating QH, it's different */ if (i == UHCI_NUM_SKELQH - 1) { @@ -487,44 +365,27 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) continue; } - j = (i < 7) ? 7 : i+1; /* Next skeleton */ - if (list_empty(&qh->list)) { - if (i < UHCI_NUM_SKELQH - 1) { - if (qh->link != - (cpu_to_le32(uhci->skelqh[j]->dma_handle) | UHCI_PTR_QH)) { - show_qh_name(); - out += sprintf(out, " skeleton QH not linked to next skeleton QH!\n"); - } - } - - continue; - } - - show_qh_name(); - - head = &qh->list; + j = (i < 9) ? 9 : i+1; /* Next skeleton */ + head = &qh->node; tmp = head->next; while (tmp != head) { - qh = list_entry(tmp, struct uhci_qh, list); - + qh = list_entry(tmp, struct uhci_qh, node); tmp = tmp->next; - - out += uhci_show_qh(qh, out, len - (out - buf), 4); + if (++cnt <= 10) + out += uhci_show_qh(qh, out, + len - (out - buf), 4); } + if ((cnt -= 10) > 0) + out += sprintf(out, " Skipped %d QHs\n", cnt); - if (i < UHCI_NUM_SKELQH - 1) { + if (i > 1 && i < UHCI_NUM_SKELQH - 1) { if (qh->link != (cpu_to_le32(uhci->skelqh[j]->dma_handle) | UHCI_PTR_QH)) out += sprintf(out, " last QH not linked to next skeleton!\n"); } } - if (debug > 2) - out += uhci_show_lists(uhci, out, len - (out - buf)); - - spin_unlock_irqrestore(&uhci->lock, flags); - return out - buf; } @@ -541,6 +402,7 @@ static int uhci_debug_open(struct inode *inode, struct file *file) struct uhci_hcd *uhci = inode->u.generic_ip; struct uhci_debug *up; int ret = -ENOMEM; + unsigned long flags; lock_kernel(); up = kmalloc(sizeof(*up), GFP_KERNEL); @@ -553,7 +415,9 @@ static int uhci_debug_open(struct inode *inode, struct file *file) goto out; } + spin_lock_irqsave(&uhci->lock, flags); up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT); + spin_unlock_irqrestore(&uhci->lock, flags); file->private_data = up; diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index dfe121d35887..1ff4b8806372 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -54,7 +54,7 @@ /* * Version Information */ -#define DRIVER_VERSION "v2.3" +#define DRIVER_VERSION "v3.0" #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \ Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \ Alan Stern" @@ -489,15 +489,11 @@ static int uhci_start(struct usb_hcd *hcd) uhci->fsbrtimeout = 0; spin_lock_init(&uhci->lock); - INIT_LIST_HEAD(&uhci->qh_remove_list); INIT_LIST_HEAD(&uhci->td_remove_list); - - INIT_LIST_HEAD(&uhci->urb_remove_list); - INIT_LIST_HEAD(&uhci->urb_list); - INIT_LIST_HEAD(&uhci->complete_list); + INIT_LIST_HEAD(&uhci->idle_qh_list); init_waitqueue_head(&uhci->waitqh); @@ -540,7 +536,7 @@ static int uhci_start(struct usb_hcd *hcd) } for (i = 0; i < UHCI_NUM_SKELQH; i++) { - uhci->skelqh[i] = uhci_alloc_qh(uhci); + uhci->skelqh[i] = uhci_alloc_qh(uhci, NULL, NULL); if (!uhci->skelqh[i]) { dev_err(uhci_dev(uhci), "unable to allocate QH\n"); goto err_alloc_skelqh; @@ -557,13 +553,17 @@ static int uhci_start(struct usb_hcd *hcd) uhci->skel_int16_qh->link = uhci->skel_int8_qh->link = uhci->skel_int4_qh->link = - uhci->skel_int2_qh->link = - cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH; - uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH; - - uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; - uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH; - uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH; + uhci->skel_int2_qh->link = UHCI_PTR_QH | + cpu_to_le32(uhci->skel_int1_qh->dma_handle); + + uhci->skel_int1_qh->link = UHCI_PTR_QH | + cpu_to_le32(uhci->skel_ls_control_qh->dma_handle); + uhci->skel_ls_control_qh->link = UHCI_PTR_QH | + cpu_to_le32(uhci->skel_fs_control_qh->dma_handle); + uhci->skel_fs_control_qh->link = UHCI_PTR_QH | + cpu_to_le32(uhci->skel_bulk_qh->dma_handle); + uhci->skel_bulk_qh->link = UHCI_PTR_QH | + cpu_to_le32(uhci->skel_term_qh->dma_handle); /* This dummy TD is to work around a bug in Intel PIIX controllers */ uhci_fill_td(uhci->term_td, 0, uhci_explen(0) | @@ -589,15 +589,15 @@ static int uhci_start(struct usb_hcd *hcd) /* * ffs (Find First bit Set) does exactly what we need: - * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[6], - * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc. - * ffs > 6 => not on any high-period queue, so use - * skel_int1_qh = skelqh[7]. + * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8], + * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc. + * ffs >= 7 => not on any high-period queue, so use + * skel_int1_qh = skelqh[9]. * Add UHCI_NUMFRAMES to insure at least one bit is set. */ - irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES); - if (irq < 0) - irq = 7; + irq = 8 - (int) __ffs(i + UHCI_NUMFRAMES); + if (irq <= 1) + irq = 9; /* Only place we don't use the frame list routines */ uhci->frame[i] = UHCI_PTR_QH | @@ -767,13 +767,30 @@ static int uhci_resume(struct usb_hcd *hcd) } #endif -/* Wait until all the URBs for a particular device/endpoint are gone */ +/* Wait until a particular device/endpoint's QH is idle, and free it */ static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd, - struct usb_host_endpoint *ep) + struct usb_host_endpoint *hep) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); + struct uhci_qh *qh; + + spin_lock_irq(&uhci->lock); + qh = (struct uhci_qh *) hep->hcpriv; + if (qh == NULL) + goto done; - wait_event_interruptible(uhci->waitqh, list_empty(&ep->urb_list)); + while (qh->state != QH_STATE_IDLE) { + ++uhci->num_waiting; + spin_unlock_irq(&uhci->lock); + wait_event_interruptible(uhci->waitqh, + qh->state == QH_STATE_IDLE); + spin_lock_irq(&uhci->lock); + --uhci->num_waiting; + } + + uhci_free_qh(uhci, qh); +done: + spin_unlock_irq(&uhci->lock); } static int uhci_hcd_get_frame_number(struct usb_hcd *hcd) diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h index 8b4b887a7d41..7a9481c09a05 100644 --- a/drivers/usb/host/uhci-hcd.h +++ b/drivers/usb/host/uhci-hcd.h @@ -28,8 +28,9 @@ #define USBSTS_USBINT 0x0001 /* Interrupt due to IOC */ #define USBSTS_ERROR 0x0002 /* Interrupt due to error */ #define USBSTS_RD 0x0004 /* Resume Detect */ -#define USBSTS_HSE 0x0008 /* Host System Error - basically PCI problems */ -#define USBSTS_HCPE 0x0010 /* Host Controller Process Error - the scripts were buggy */ +#define USBSTS_HSE 0x0008 /* Host System Error: PCI problems */ +#define USBSTS_HCPE 0x0010 /* Host Controller Process Error: + * the schedule is buggy */ #define USBSTS_HCH 0x0020 /* HC Halted */ /* Interrupt enable register */ @@ -47,7 +48,8 @@ /* USB port status and control registers */ #define USBPORTSC1 16 #define USBPORTSC2 18 -#define USBPORTSC_CCS 0x0001 /* Current Connect Status ("device present") */ +#define USBPORTSC_CCS 0x0001 /* Current Connect Status + * ("device present") */ #define USBPORTSC_CSC 0x0002 /* Connect Status Change */ #define USBPORTSC_PE 0x0004 /* Port Enable */ #define USBPORTSC_PEC 0x0008 /* Port Enable Change */ @@ -71,15 +73,16 @@ #define USBLEGSUP_RWC 0x8f00 /* the R/WC bits */ #define USBLEGSUP_RO 0x5040 /* R/O and reserved bits */ -#define UHCI_PTR_BITS cpu_to_le32(0x000F) -#define UHCI_PTR_TERM cpu_to_le32(0x0001) -#define UHCI_PTR_QH cpu_to_le32(0x0002) -#define UHCI_PTR_DEPTH cpu_to_le32(0x0004) -#define UHCI_PTR_BREADTH cpu_to_le32(0x0000) +#define UHCI_PTR_BITS __constant_cpu_to_le32(0x000F) +#define UHCI_PTR_TERM __constant_cpu_to_le32(0x0001) +#define UHCI_PTR_QH __constant_cpu_to_le32(0x0002) +#define UHCI_PTR_DEPTH __constant_cpu_to_le32(0x0004) +#define UHCI_PTR_BREADTH __constant_cpu_to_le32(0x0000) #define UHCI_NUMFRAMES 1024 /* in the frame list [array] */ #define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */ -#define CAN_SCHEDULE_FRAMES 1000 /* how far future frames can be scheduled */ +#define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames + * can be scheduled */ /* @@ -87,38 +90,54 @@ */ /* - * One role of a QH is to hold a queue of TDs for some endpoint. Each QH is - * used with one URB, and qh->element (updated by the HC) is either: - * - the next unprocessed TD for the URB, or - * - UHCI_PTR_TERM (when there's no more traffic for this endpoint), or - * - the QH for the next URB queued to the same endpoint. + * One role of a QH is to hold a queue of TDs for some endpoint. One QH goes + * with each endpoint, and qh->element (updated by the HC) is either: + * - the next unprocessed TD in the endpoint's queue, or + * - UHCI_PTR_TERM (when there's no more traffic for this endpoint). * * The other role of a QH is to serve as a "skeleton" framelist entry, so we * can easily splice a QH for some endpoint into the schedule at the right * place. Then qh->element is UHCI_PTR_TERM. * - * In the frame list, qh->link maintains a list of QHs seen by the HC: + * In the schedule, qh->link maintains a list of QHs seen by the HC: * skel1 --> ep1-qh --> ep2-qh --> ... --> skel2 --> ... + * + * qh->node is the software equivalent of qh->link. The differences + * are that the software list is doubly-linked and QHs in the UNLINKING + * state are on the software list but not the hardware schedule. + * + * For bookkeeping purposes we maintain QHs even for Isochronous endpoints, + * but they never get added to the hardware schedule. */ +#define QH_STATE_IDLE 1 /* QH is not being used */ +#define QH_STATE_UNLINKING 2 /* QH has been removed from the + * schedule but the hardware may + * still be using it */ +#define QH_STATE_ACTIVE 3 /* QH is on the schedule */ + struct uhci_qh { /* Hardware fields */ - __le32 link; /* Next queue */ - __le32 element; /* Queue element pointer */ + __le32 link; /* Next QH in the schedule */ + __le32 element; /* Queue element (TD) pointer */ /* Software fields */ dma_addr_t dma_handle; - struct urb_priv *urbp; + struct list_head node; /* Node in the list of QHs */ + struct usb_host_endpoint *hep; /* Endpoint information */ + struct usb_device *udev; + struct list_head queue; /* Queue of urbps for this QH */ + struct uhci_qh *skel; /* Skeleton for this QH */ - struct list_head list; - struct list_head remove_list; + unsigned int unlink_frame; /* When the QH was unlinked */ + int state; /* QH_STATE_xxx; see above */ } __attribute__((aligned(16))); /* * We need a special accessor for the element pointer because it is * subject to asynchronous updates by the controller. */ -static __le32 inline qh_element(struct uhci_qh *qh) { +static inline __le32 qh_element(struct uhci_qh *qh) { __le32 element = qh->element; barrier(); @@ -149,11 +168,13 @@ static __le32 inline qh_element(struct uhci_qh *qh) { #define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */ #define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \ - TD_CTRL_BABBLE | TD_CTRL_CRCTIME | TD_CTRL_BITSTUFF) + TD_CTRL_BABBLE | TD_CTRL_CRCTIME | \ + TD_CTRL_BITSTUFF) #define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT) #define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xF60000) -#define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & TD_CTRL_ACTLEN_MASK) /* 1-based */ +#define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & \ + TD_CTRL_ACTLEN_MASK) /* 1-based */ /* * for TD <info>: (a.k.a. Token) @@ -163,7 +184,7 @@ static __le32 inline qh_element(struct uhci_qh *qh) { #define TD_TOKEN_TOGGLE_SHIFT 19 #define TD_TOKEN_TOGGLE (1 << 19) #define TD_TOKEN_EXPLEN_SHIFT 21 -#define TD_TOKEN_EXPLEN_MASK 0x7FF /* expected length, encoded as n - 1 */ +#define TD_TOKEN_EXPLEN_MASK 0x7FF /* expected length, encoded as n-1 */ #define TD_TOKEN_PID_MASK 0xFF #define uhci_explen(len) ((((len) - 1) & TD_TOKEN_EXPLEN_MASK) << \ @@ -187,7 +208,7 @@ static __le32 inline qh_element(struct uhci_qh *qh) { * sw space after the TD entry. * * td->link points to either another TD (not necessarily for the same urb or - * even the same endpoint), or nothing (PTR_TERM), or a QH (for queued urbs). + * even the same endpoint), or nothing (PTR_TERM), or a QH. */ struct uhci_td { /* Hardware fields */ @@ -210,7 +231,7 @@ struct uhci_td { * We need a special accessor for the control/status word because it is * subject to asynchronous updates by the controller. */ -static u32 inline td_status(struct uhci_td *td) { +static inline u32 td_status(struct uhci_td *td) { __le32 status = td->status; barrier(); @@ -223,17 +244,14 @@ static u32 inline td_status(struct uhci_td *td) { */ /* - * The UHCI driver places Interrupt, Control and Bulk into QHs both - * to group together TDs for one transfer, and also to facilitate queuing - * of URBs. To make it easy to insert entries into the schedule, we have - * a skeleton of QHs for each predefined Interrupt latency, low-speed - * control, full-speed control and terminating QH (see explanation for - * the terminating QH below). + * The UHCI driver uses QHs with Interrupt, Control and Bulk URBs for + * automatic queuing. To make it easy to insert entries into the schedule, + * we have a skeleton of QHs for each predefined Interrupt latency, + * low-speed control, full-speed control, bulk, and terminating QH + * (see explanation for the terminating QH below). * * When we want to add a new QH, we add it to the end of the list for the - * skeleton QH. - * - * For instance, the queue can look like this: + * skeleton QH. For instance, the schedule list can look like this: * * skel int128 QH * dev 1 interrupt QH @@ -256,26 +274,31 @@ static u32 inline td_status(struct uhci_td *td) { * - To loop back to the full-speed control queue for full-speed bandwidth * reclamation. * - * Isochronous transfers are stored before the start of the skeleton - * schedule and don't use QHs. While the UHCI spec doesn't forbid the - * use of QHs for Isochronous, it doesn't use them either. And the spec - * says that queues never advance on an error completion status, which - * makes them totally unsuitable for Isochronous transfers. + * There's a special skeleton QH for Isochronous QHs. It never appears + * on the schedule, and Isochronous TDs go on the schedule before the + * the skeleton QHs. The hardware accesses them directly rather than + * through their QH, which is used only for bookkeeping purposes. + * While the UHCI spec doesn't forbid the use of QHs for Isochronous, + * it doesn't use them either. And the spec says that queues never + * advance on an error completion status, which makes them totally + * unsuitable for Isochronous transfers. */ -#define UHCI_NUM_SKELQH 12 -#define skel_int128_qh skelqh[0] -#define skel_int64_qh skelqh[1] -#define skel_int32_qh skelqh[2] -#define skel_int16_qh skelqh[3] -#define skel_int8_qh skelqh[4] -#define skel_int4_qh skelqh[5] -#define skel_int2_qh skelqh[6] -#define skel_int1_qh skelqh[7] -#define skel_ls_control_qh skelqh[8] -#define skel_fs_control_qh skelqh[9] -#define skel_bulk_qh skelqh[10] -#define skel_term_qh skelqh[11] +#define UHCI_NUM_SKELQH 14 +#define skel_unlink_qh skelqh[0] +#define skel_iso_qh skelqh[1] +#define skel_int128_qh skelqh[2] +#define skel_int64_qh skelqh[3] +#define skel_int32_qh skelqh[4] +#define skel_int16_qh skelqh[5] +#define skel_int8_qh skelqh[6] +#define skel_int4_qh skelqh[7] +#define skel_int2_qh skelqh[8] +#define skel_int1_qh skelqh[9] +#define skel_ls_control_qh skelqh[10] +#define skel_fs_control_qh skelqh[11] +#define skel_bulk_qh skelqh[12] +#define skel_term_qh skelqh[13] /* * Search tree for determining where <interval> fits in the skelqh[] @@ -293,21 +316,21 @@ static inline int __interval_to_skel(int interval) if (interval < 16) { if (interval < 4) { if (interval < 2) - return 7; /* int1 for 0-1 ms */ - return 6; /* int2 for 2-3 ms */ + return 9; /* int1 for 0-1 ms */ + return 8; /* int2 for 2-3 ms */ } if (interval < 8) - return 5; /* int4 for 4-7 ms */ - return 4; /* int8 for 8-15 ms */ + return 7; /* int4 for 4-7 ms */ + return 6; /* int8 for 8-15 ms */ } if (interval < 64) { if (interval < 32) - return 3; /* int16 for 16-31 ms */ - return 2; /* int32 for 32-63 ms */ + return 5; /* int16 for 16-31 ms */ + return 4; /* int32 for 32-63 ms */ } if (interval < 128) - return 1; /* int64 for 64-127 ms */ - return 0; /* int128 for 128-255 ms (Max.) */ + return 3; /* int64 for 64-127 ms */ + return 2; /* int128 for 128-255 ms (Max.) */ } @@ -363,12 +386,12 @@ struct uhci_hcd { spinlock_t lock; - dma_addr_t frame_dma_handle; /* Hardware frame list */ + dma_addr_t frame_dma_handle; /* Hardware frame list */ __le32 *frame; - void **frame_cpu; /* CPU's frame list */ + void **frame_cpu; /* CPU's frame list */ - int fsbr; /* Full-speed bandwidth reclamation */ - unsigned long fsbrtimeout; /* FSBR delay */ + int fsbr; /* Full-speed bandwidth reclamation */ + unsigned long fsbrtimeout; /* FSBR delay */ enum uhci_rh_state rh_state; unsigned long auto_stop_time; /* When to AUTO_STOP */ @@ -392,24 +415,19 @@ struct uhci_hcd { /* Main list of URBs currently controlled by this HC */ struct list_head urb_list; - /* List of QHs that are done, but waiting to be unlinked (race) */ - struct list_head qh_remove_list; - unsigned int qh_remove_age; /* Age in frames */ - /* List of TDs that are done, but waiting to be freed (race) */ struct list_head td_remove_list; unsigned int td_remove_age; /* Age in frames */ - /* List of asynchronously unlinked URBs */ - struct list_head urb_remove_list; - unsigned int urb_remove_age; /* Age in frames */ - /* List of URBs awaiting completion callback */ struct list_head complete_list; + struct list_head idle_qh_list; /* Where the idle QHs live */ + int rh_numports; /* Number of root-hub ports */ wait_queue_head_t waitqh; /* endpoint_disable waiters */ + int num_waiting; /* Number of waiters */ }; /* Convert between a usb_hcd pointer and the corresponding uhci_hcd */ @@ -430,22 +448,19 @@ static inline struct usb_hcd *uhci_to_hcd(struct uhci_hcd *uhci) */ struct urb_priv { struct list_head urb_list; + struct list_head node; /* Node in the QH's urbp list */ struct urb *urb; struct uhci_qh *qh; /* QH for this URB */ struct list_head td_list; - unsigned fsbr : 1; /* URB turned on FSBR */ - unsigned fsbr_timeout : 1; /* URB timed out on FSBR */ - unsigned queued : 1; /* QH was queued (not linked in) */ - unsigned short_control_packet : 1; /* If we get a short packet during */ - /* a control transfer, retrigger */ - /* the status phase */ - unsigned long fsbrtime; /* In jiffies */ - struct list_head queue_list; + unsigned fsbr : 1; /* URB turned on FSBR */ + unsigned fsbr_timeout : 1; /* URB timed out on FSBR */ + unsigned short_transfer : 1; /* URB got a short transfer, no + * need to rescan */ }; diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 782398045f9f..b1b551a3d14e 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -13,13 +13,9 @@ * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) - * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu + * (C) Copyright 2004-2005 Alan Stern, stern@rowland.harvard.edu */ -static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb); -static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb); -static void uhci_remove_pending_urbps(struct uhci_hcd *uhci); -static void uhci_free_pending_qhs(struct uhci_hcd *uhci); static void uhci_free_pending_tds(struct uhci_hcd *uhci); /* @@ -30,7 +26,7 @@ static void uhci_free_pending_tds(struct uhci_hcd *uhci); * games with the FSBR code to make sure we get the correct order in all * the cases. I don't think it's worth the effort */ -static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci) +static void uhci_set_next_interrupt(struct uhci_hcd *uhci) { if (uhci->is_stopped) mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); @@ -42,12 +38,6 @@ static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); } -static inline void uhci_moveto_complete(struct uhci_hcd *uhci, - struct urb_priv *urbp) -{ - list_move_tail(&urbp->urb_list, &uhci->complete_list); -} - static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) { dma_addr_t dma_handle; @@ -71,6 +61,18 @@ static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) return td; } +static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) +{ + if (!list_empty(&td->list)) + dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); + if (!list_empty(&td->remove_list)) + dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td); + if (!list_empty(&td->fl_list)) + dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); + + dma_pool_free(uhci->td_pool, td, td->dma_handle); +} + static inline void uhci_fill_td(struct uhci_td *td, u32 status, u32 token, u32 buffer) { @@ -82,7 +84,8 @@ static inline void uhci_fill_td(struct uhci_td *td, u32 status, /* * We insert Isochronous URBs directly into the frame list at the beginning */ -static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum) +static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, + struct uhci_td *td, unsigned framenum) { framenum &= (UHCI_NUMFRAMES - 1); @@ -108,7 +111,7 @@ static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, } } -static inline void uhci_remove_td_frame_list(struct uhci_hcd *uhci, +static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, struct uhci_td *td) { /* If it's not inserted, don't remove it */ @@ -139,48 +142,68 @@ static inline void uhci_remove_td_frame_list(struct uhci_hcd *uhci, td->frame = -1; } -static void unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) +/* + * Remove all the TDs for an Isochronous URB from the frame list + */ +static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) { struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; struct uhci_td *td; list_for_each_entry(td, &urbp->td_list, list) - uhci_remove_td_frame_list(uhci, td); + uhci_remove_td_from_frame_list(uhci, td); wmb(); } /* - * Inserts a td list into qh. + * Remove an URB's TDs from the hardware schedule */ -static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth) +static void uhci_remove_tds_from_schedule(struct uhci_hcd *uhci, + struct urb *urb, int status) { - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - struct uhci_td *td; - __le32 *plink; + struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; - /* Ordering isn't important here yet since the QH hasn't been */ - /* inserted into the schedule yet */ - plink = &qh->element; - list_for_each_entry(td, &urbp->td_list, list) { - *plink = cpu_to_le32(td->dma_handle) | breadth; - plink = &td->link; + /* Isochronous TDs get unlinked directly from the frame list */ + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + uhci_unlink_isochronous_tds(uhci, urb); + return; } - *plink = UHCI_PTR_TERM; -} -static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) -{ - if (!list_empty(&td->list)) - dev_warn(uhci_dev(uhci), "td %p still in list!\n", td); - if (!list_empty(&td->remove_list)) - dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td); - if (!list_empty(&td->fl_list)) - dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td); + /* If the URB isn't first on its queue, adjust the link pointer + * of the last TD in the previous URB. */ + if (urbp->node.prev != &urbp->qh->queue) { + struct urb_priv *purbp; + struct uhci_td *ptd, *ltd; + + if (status == -EINPROGRESS) + status = 0; + purbp = list_entry(urbp->node.prev, struct urb_priv, node); + ptd = list_entry(purbp->td_list.prev, struct uhci_td, + list); + ltd = list_entry(urbp->td_list.prev, struct uhci_td, + list); + ptd->link = ltd->link; + } - dma_pool_free(uhci->td_pool, td, td->dma_handle); + /* If the URB completed with an error, then the QH element certainly + * points to one of the URB's TDs. If it completed normally then + * the QH element has certainly moved on to the next URB. And if + * the URB is still in progress then it must have been dequeued. + * The QH element either hasn't reached it yet or is somewhere in + * the middle. If the URB wasn't first we can assume that it + * hasn't started yet (see above): Otherwise all the preceding URBs + * would have completed and been removed from the queue, so this one + * _would_ be first. + * + * If the QH element is inside this URB, clear it. It will be + * set properly when the QH is activated. + */ + if (status < 0) + urbp->qh->element = UHCI_PTR_TERM; } -static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci) +static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, + struct usb_device *udev, struct usb_host_endpoint *hep) { dma_addr_t dma_handle; struct uhci_qh *qh; @@ -194,256 +217,120 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci) qh->element = UHCI_PTR_TERM; qh->link = UHCI_PTR_TERM; - qh->urbp = NULL; + INIT_LIST_HEAD(&qh->queue); + INIT_LIST_HEAD(&qh->node); - INIT_LIST_HEAD(&qh->list); - INIT_LIST_HEAD(&qh->remove_list); + if (udev) { /* Normal QH */ + qh->state = QH_STATE_IDLE; + qh->hep = hep; + qh->udev = udev; + hep->hcpriv = qh; + usb_get_dev(udev); + } else { /* Skeleton QH */ + qh->state = QH_STATE_ACTIVE; + qh->udev = NULL; + } return qh; } static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) { - if (!list_empty(&qh->list)) + WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); + if (!list_empty(&qh->queue)) dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh); - if (!list_empty(&qh->remove_list)) - dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh); + list_del(&qh->node); + if (qh->udev) { + qh->hep->hcpriv = NULL; + usb_put_dev(qh->udev); + } dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); } /* - * Append this urb's qh after the last qh in skelqh->list - * - * Note that urb_priv.queue_list doesn't have a separate queue head; - * it's a ring with every element "live". + * Put a QH on the schedule in both hardware and software */ -static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb) +static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) { - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - struct urb_priv *turbp; - struct uhci_qh *lqh; + struct uhci_qh *pqh; - /* Grab the last QH */ - lqh = list_entry(skelqh->list.prev, struct uhci_qh, list); + WARN_ON(list_empty(&qh->queue)); - /* Point to the next skelqh */ - urbp->qh->link = lqh->link; - wmb(); /* Ordering is important */ + /* Set the element pointer if it isn't set already. + * This isn't needed for Isochronous queues, but it doesn't hurt. */ + if (qh_element(qh) == UHCI_PTR_TERM) { + struct urb_priv *urbp = list_entry(qh->queue.next, + struct urb_priv, node); + struct uhci_td *td = list_entry(urbp->td_list.next, + struct uhci_td, list); - /* - * Patch QHs for previous endpoint's queued URBs? HC goes - * here next, not to the next skelqh it now points to. - * - * lqh --> td ... --> qh ... --> td --> qh ... --> td - * | | | - * v v v - * +<----------------+-----------------+ - * v - * newqh --> td ... --> td - * | - * v - * ... - * - * The HC could see (and use!) any of these as we write them. - */ - lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; - if (lqh->urbp) { - list_for_each_entry(turbp, &lqh->urbp->queue_list, queue_list) - turbp->qh->link = lqh->link; + qh->element = cpu_to_le32(td->dma_handle); } - list_add_tail(&urbp->qh->list, &skelqh->list); + if (qh->state == QH_STATE_ACTIVE) + return; + qh->state = QH_STATE_ACTIVE; + + /* Move the QH from its old list to the end of the appropriate + * skeleton's list */ + list_move_tail(&qh->node, &qh->skel->node); + + /* Link it into the schedule */ + pqh = list_entry(qh->node.prev, struct uhci_qh, node); + qh->link = pqh->link; + wmb(); + pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle); } /* - * Start removal of QH from schedule; it finishes next frame. - * TDs should be unlinked before this is called. + * Take a QH off the hardware schedule */ -static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) +static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct uhci_qh *pqh; - __le32 newlink; - if (!qh) + if (qh->state == QH_STATE_UNLINKING) return; + WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); + qh->state = QH_STATE_UNLINKING; - /* - * Only go through the hoops if it's actually linked in - */ - if (!list_empty(&qh->list)) { - - /* If our queue is nonempty, make the next URB the head */ - if (!list_empty(&qh->urbp->queue_list)) { - struct urb_priv *nurbp; - - nurbp = list_entry(qh->urbp->queue_list.next, - struct urb_priv, queue_list); - nurbp->queued = 0; - list_add(&nurbp->qh->list, &qh->list); - newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; - } else - newlink = qh->link; - - /* Fix up the previous QH's queue to link to either - * the new head of this queue or the start of the - * next endpoint's queue. */ - pqh = list_entry(qh->list.prev, struct uhci_qh, list); - pqh->link = newlink; - if (pqh->urbp) { - struct urb_priv *turbp; - - list_for_each_entry(turbp, &pqh->urbp->queue_list, - queue_list) - turbp->qh->link = newlink; - } - wmb(); - - /* Leave qh->link in case the HC is on the QH now, it will */ - /* continue the rest of the schedule */ - qh->element = UHCI_PTR_TERM; - - list_del_init(&qh->list); - } - - list_del_init(&qh->urbp->queue_list); - qh->urbp = NULL; + /* Unlink the QH from the schedule and record when we did it */ + pqh = list_entry(qh->node.prev, struct uhci_qh, node); + pqh->link = qh->link; + mb(); uhci_get_current_frame_number(uhci); - if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age) { - uhci_free_pending_qhs(uhci); - uhci->qh_remove_age = uhci->frame_number; - } + qh->unlink_frame = uhci->frame_number; - /* Check to see if the remove list is empty. Set the IOC bit */ - /* to force an interrupt so we can remove the QH */ - if (list_empty(&uhci->qh_remove_list)) + /* Force an interrupt so we know when the QH is fully unlinked */ + if (list_empty(&uhci->skel_unlink_qh->node)) uhci_set_next_interrupt(uhci); - list_add(&qh->remove_list, &uhci->qh_remove_list); + /* Move the QH from its old list to the end of the unlinking list */ + list_move_tail(&qh->node, &uhci->skel_unlink_qh->node); } -static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle) -{ - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - struct uhci_td *td; - - list_for_each_entry(td, &urbp->td_list, list) { - if (toggle) - td->token |= cpu_to_le32(TD_TOKEN_TOGGLE); - else - td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE); - - toggle ^= 1; - } - - return toggle; -} - -/* This function will append one URB's QH to another URB's QH. This is for */ -/* queuing interrupt, control or bulk transfers */ -static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb) -{ - struct urb_priv *eurbp, *urbp, *furbp, *lurbp; - struct uhci_td *lltd; - - eurbp = eurb->hcpriv; - urbp = urb->hcpriv; - - /* Find the first URB in the queue */ - furbp = eurbp; - if (eurbp->queued) { - list_for_each_entry(furbp, &eurbp->queue_list, queue_list) - if (!furbp->queued) - break; - } - - lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list); - - lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list); - - /* Control transfers always start with toggle 0 */ - if (!usb_pipecontrol(urb->pipe)) - usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), - usb_pipeout(urb->pipe), - uhci_fixup_toggle(urb, - uhci_toggle(td_token(lltd)) ^ 1)); - - /* All qhs in the queue need to link to the next queue */ - urbp->qh->link = eurbp->qh->link; - - wmb(); /* Make sure we flush everything */ - - lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH; - - list_add_tail(&urbp->queue_list, &furbp->queue_list); - - urbp->queued = 1; -} - -static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb) +/* + * When we and the controller are through with a QH, it becomes IDLE. + * This happens when a QH has been off the schedule (on the unlinking + * list) for more than one frame, or when an error occurs while adding + * the first URB onto a new QH. + */ +static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) { - struct urb_priv *urbp, *nurbp, *purbp, *turbp; - struct uhci_td *pltd; - unsigned int toggle; - - urbp = urb->hcpriv; - - if (list_empty(&urbp->queue_list)) - return; - - nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list); - - /* - * Fix up the toggle for the following URBs in the queue. - * Only needed for bulk and interrupt: control and isochronous - * endpoints don't propagate toggles between messages. - */ - if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) { - if (!urbp->queued) - /* We just set the toggle in uhci_unlink_generic */ - toggle = usb_gettoggle(urb->dev, - usb_pipeendpoint(urb->pipe), - usb_pipeout(urb->pipe)); - else { - /* If we're in the middle of the queue, grab the */ - /* toggle from the TD previous to us */ - purbp = list_entry(urbp->queue_list.prev, - struct urb_priv, queue_list); - pltd = list_entry(purbp->td_list.prev, - struct uhci_td, list); - toggle = uhci_toggle(td_token(pltd)) ^ 1; - } - - list_for_each_entry(turbp, &urbp->queue_list, queue_list) { - if (!turbp->queued) - break; - toggle = uhci_fixup_toggle(turbp->urb, toggle); - } + WARN_ON(qh->state == QH_STATE_ACTIVE); - usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), - usb_pipeout(urb->pipe), toggle); - } - - if (urbp->queued) { - /* We're somewhere in the middle (or end). The case where - * we're at the head is handled in uhci_remove_qh(). */ - purbp = list_entry(urbp->queue_list.prev, struct urb_priv, - queue_list); - - pltd = list_entry(purbp->td_list.prev, struct uhci_td, list); - if (nurbp->queued) - pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH; - else - /* The next URB happens to be the beginning, so */ - /* we're the last, end the chain */ - pltd->link = UHCI_PTR_TERM; - } + list_move(&qh->node, &uhci->idle_qh_list); + qh->state = QH_STATE_IDLE; - /* urbp->queue_list is handled in uhci_remove_qh() */ + /* If anyone is waiting for a QH to become idle, wake them up */ + if (uhci->num_waiting) + wake_up_all(&uhci->waitqh); } -static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb) +static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, + struct urb *urb) { struct urb_priv *urbp; @@ -453,17 +340,14 @@ static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *u memset((void *)urbp, 0, sizeof(*urbp)); - urbp->fsbrtime = jiffies; urbp->urb = urb; + urb->hcpriv = urbp; + urbp->fsbrtime = jiffies; + INIT_LIST_HEAD(&urbp->node); INIT_LIST_HEAD(&urbp->td_list); - INIT_LIST_HEAD(&urbp->queue_list); INIT_LIST_HEAD(&urbp->urb_list); - list_add_tail(&urbp->urb_list, &uhci->urb_list); - - urb->hcpriv = urbp; - return urbp; } @@ -482,18 +366,17 @@ static void uhci_remove_td_from_urb(struct uhci_td *td) list_del_init(&td->list); } -static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb) +static void uhci_free_urb_priv(struct uhci_hcd *uhci, + struct urb_priv *urbp) { struct uhci_td *td, *tmp; - struct urb_priv *urbp; - - urbp = (struct urb_priv *)urb->hcpriv; - if (!urbp) - return; if (!list_empty(&urbp->urb_list)) - dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list " - "or uhci->remove_list!\n", urb); + dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list!\n", + urbp->urb); + if (!list_empty(&urbp->node)) + dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n", + urbp->urb); uhci_get_current_frame_number(uhci); if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) { @@ -502,7 +385,7 @@ static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb) } /* Check to see if the remove list is empty. Set the IOC bit */ - /* to force an interrupt so we can remove the TDs*/ + /* to force an interrupt so we can remove the TDs. */ if (list_empty(&uhci->td_remove_list)) uhci_set_next_interrupt(uhci); @@ -511,7 +394,7 @@ static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb) list_add(&td->remove_list, &uhci->td_remove_list); } - urb->hcpriv = NULL; + urbp->urb->hcpriv = NULL; kmem_cache_free(uhci_up_cachep, urbp); } @@ -568,17 +451,82 @@ static int uhci_map_status(int status, int dir_out) } /* + * Fix up the data toggles for URBs in a queue, when one of them + * terminates early (short transfer, error, or dequeued). + */ +static void uhci_fixup_toggles(struct urb *urb) +{ + struct list_head *head; + struct uhci_td *td; + struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; + int prevactive = 0; + unsigned int toggle = 0; + struct urb_priv *turbp, *list_end; + + /* + * We need to find out what the last successful toggle was so + * we can update the data toggles for the following transfers. + * + * There are 2 ways the last successful completed TD is found: + * + * 1) The TD is NOT active and the actual length < expected length + * 2) The TD is NOT active and it's the last TD in the chain + * + * and a third way the first uncompleted TD is found: + * + * 3) The TD is active and the previous TD is NOT active + */ + head = &urbp->td_list; + list_for_each_entry(td, head, list) { + unsigned int ctrlstat = td_status(td); + + if (!(ctrlstat & TD_CTRL_ACTIVE) && + (uhci_actual_length(ctrlstat) < + uhci_expected_length(td_token(td)) || + td->list.next == head)) + toggle = uhci_toggle(td_token(td)) ^ 1; + else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive) + toggle = uhci_toggle(td_token(td)); + + prevactive = ctrlstat & TD_CTRL_ACTIVE; + } + + /* + * Fix up the toggle for the following URBs in the queue. + * + * We can stop as soon as we find an URB with toggles set correctly, + * because then all the following URBs will be correct also. + */ + list_end = list_entry(&urbp->qh->queue, struct urb_priv, node); + turbp = urbp; + while ((turbp = list_entry(turbp->node.next, struct urb_priv, node)) + != list_end) { + td = list_entry(turbp->td_list.next, struct uhci_td, list); + if (uhci_toggle(td_token(td)) == toggle) + return; + + list_for_each_entry(td, &turbp->td_list, list) { + td->token ^= __constant_cpu_to_le32(TD_TOKEN_TOGGLE); + toggle ^= 1; + } + } + + usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), + usb_pipeout(urb->pipe), toggle); +} + +/* * Control transfers */ -static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) +static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, + struct uhci_qh *qh) { - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; struct uhci_td *td; - struct uhci_qh *qh, *skelqh; unsigned long destination, status; - int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); + int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); int len = urb->transfer_buffer_length; dma_addr_t data = urb->transfer_dma; + __le32 *plink; /* The "pipe" thing contains the destination in bits 8--18 */ destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; @@ -597,7 +545,8 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct ur uhci_add_td_to_urb(urb, td); uhci_fill_td(td, status, destination | uhci_explen(8), - urb->setup_dma); + urb->setup_dma); + plink = &td->link; /* * If direction is "send", change the packet ID from SETUP (0x2D) @@ -615,21 +564,20 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct ur * Build the DATA TDs */ while (len > 0) { - int pktsze = len; - - if (pktsze > maxsze) - pktsze = maxsze; + int pktsze = min(len, maxsze); td = uhci_alloc_td(uhci); if (!td) return -ENOMEM; + *plink = cpu_to_le32(td->dma_handle); /* Alternate Data0/1 (start with Data1) */ destination ^= TD_TOKEN_TOGGLE; uhci_add_td_to_urb(urb, td); uhci_fill_td(td, status, destination | uhci_explen(pktsze), - data); + data); + plink = &td->link; data += pktsze; len -= pktsze; @@ -641,6 +589,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct ur td = uhci_alloc_td(uhci); if (!td) return -ENOMEM; + *plink = cpu_to_le32(td->dma_handle); /* * It's IN if the pipe is an output pipe or we're not expecting @@ -658,16 +607,7 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct ur uhci_add_td_to_urb(urb, td); uhci_fill_td(td, status | TD_CTRL_IOC, - destination | uhci_explen(0), 0); - - qh = uhci_alloc_qh(uhci); - if (!qh) - return -ENOMEM; - - urbp->qh = qh; - qh->urbp = urbp; - - uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); + destination | uhci_explen(0), 0); /* Low-speed transfers get a different queue, and won't hog the bus. * Also, some devices enumerate better without FSBR; the easiest way @@ -675,18 +615,13 @@ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct ur * isn't in the CONFIGURED state. */ if (urb->dev->speed == USB_SPEED_LOW || urb->dev->state != USB_STATE_CONFIGURED) - skelqh = uhci->skel_ls_control_qh; + qh->skel = uhci->skel_ls_control_qh; else { - skelqh = uhci->skel_fs_control_qh; + qh->skel = uhci->skel_fs_control_qh; uhci_inc_fsbr(uhci, urb); } - if (eurb) - uhci_append_queued_urb(uhci, eurb, urb); - else - uhci_insert_qh(uhci, skelqh, urb); - - return -EINPROGRESS; + return 0; } /* @@ -703,7 +638,7 @@ static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb) struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; struct uhci_td *td; - urbp->short_control_packet = 1; + urbp->short_transfer = 1; td = list_entry(urbp->td_list.prev, struct uhci_td, list); urbp->qh->element = cpu_to_le32(td->dma_handle); @@ -720,16 +655,14 @@ static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb) unsigned int status; int ret = 0; - if (list_empty(&urbp->td_list)) - return -EINVAL; - head = &urbp->td_list; - - if (urbp->short_control_packet) { + if (urbp->short_transfer) { tmp = head->prev; goto status_stage; } + urb->actual_length = 0; + tmp = head->next; td = list_entry(tmp, struct uhci_td, list); @@ -742,8 +675,6 @@ static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb) if (status) goto td_error; - urb->actual_length = 0; - /* The rest of the TDs (but the last) are data */ tmp = tmp->next; while (tmp != head && tmp->next != head) { @@ -770,10 +701,7 @@ static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb) goto err; } - if (uhci_packetid(td_token(td)) == USB_PID_IN) - return usb_control_retrigger_status(uhci, urb); - else - return 0; + return usb_control_retrigger_status(uhci, urb); } } @@ -825,15 +753,15 @@ err: /* * Common submit for bulk and interrupt */ -static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh) +static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, + struct uhci_qh *qh) { struct uhci_td *td; - struct uhci_qh *qh; unsigned long destination, status; - int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); + int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); int len = urb->transfer_buffer_length; - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; dma_addr_t data = urb->transfer_dma; + __le32 *plink, fake_link; if (len < 0) return -EINVAL; @@ -841,7 +769,8 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb /* The "pipe" thing contains the destination in bits 8--18 */ destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); - status = uhci_maxerr(3) | TD_CTRL_ACTIVE; + /* 3 errors */ + status = TD_CTRL_ACTIVE | uhci_maxerr(3); if (urb->dev->speed == USB_SPEED_LOW) status |= TD_CTRL_LS; if (usb_pipein(urb->pipe)) @@ -850,10 +779,11 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb /* * Build the DATA TDs */ + plink = &fake_link; do { /* Allow zero length packets */ int pktsze = maxsze; - if (pktsze >= len) { + if (len <= pktsze) { /* The last packet */ pktsze = len; if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) status &= ~TD_CTRL_SPD; @@ -862,12 +792,15 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb td = uhci_alloc_td(uhci); if (!td) return -ENOMEM; + *plink = cpu_to_le32(td->dma_handle); uhci_add_td_to_urb(urb, td); - uhci_fill_td(td, status, destination | uhci_explen(pktsze) | + uhci_fill_td(td, status, + destination | uhci_explen(pktsze) | (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT), data); + plink = &td->link; data += pktsze; len -= maxsze; @@ -883,11 +816,13 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb * however, if transfer_length == 0, the zero packet was already * prepared above. */ - if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) && - !len && urb->transfer_buffer_length) { + if ((urb->transfer_flags & URB_ZERO_PACKET) && + usb_pipeout(urb->pipe) && len == 0 && + urb->transfer_buffer_length > 0) { td = uhci_alloc_td(uhci); if (!td) return -ENOMEM; + *plink = cpu_to_le32(td->dma_handle); uhci_add_td_to_urb(urb, td); uhci_fill_td(td, status, destination | uhci_explen(0) | @@ -905,24 +840,9 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb * fast side but not enough to justify delaying an interrupt * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT * flag setting. */ - td->status |= cpu_to_le32(TD_CTRL_IOC); - - qh = uhci_alloc_qh(uhci); - if (!qh) - return -ENOMEM; - - urbp->qh = qh; - qh->urbp = urbp; + td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); - /* Always breadth first */ - uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH); - - if (eurb) - uhci_append_queued_urb(uhci, eurb, urb); - else - uhci_insert_qh(uhci, skelqh, urb); - - return -EINPROGRESS; + return 0; } /* @@ -954,8 +874,24 @@ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) if (urb->transfer_flags & URB_SHORT_NOT_OK) { ret = -EREMOTEIO; goto err; - } else - return 0; + } + + /* + * This URB stopped short of its end. We have to + * fix up the toggles of the following URBs on the + * queue and restart the queue. + * + * Do this only the first time we encounter the + * short URB. + */ + if (!urbp->short_transfer) { + urbp->short_transfer = 1; + uhci_fixup_toggles(urb); + td = list_entry(urbp->td_list.prev, + struct uhci_td, list); + urbp->qh->element = td->link; + } + break; } } @@ -988,7 +924,8 @@ err: return ret; } -static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) +static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, + struct uhci_qh *qh) { int ret; @@ -996,21 +933,22 @@ static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struc if (urb->dev->speed == USB_SPEED_LOW) return -EINVAL; - ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh); - if (ret == -EINPROGRESS) + qh->skel = uhci->skel_bulk_qh; + ret = uhci_submit_common(uhci, urb, qh); + if (ret == 0) uhci_inc_fsbr(uhci, urb); - return ret; } -static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb) +static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, + struct uhci_qh *qh) { - /* USB 1.1 interrupt transfers only involve one packet per interval; - * that's the uhci_submit_common() "breadth first" policy. Drivers - * can submit urbs of any length, but longer ones might need many - * intervals to complete. + /* USB 1.1 interrupt transfers only involve one packet per interval. + * Drivers can submit URBs of any length, but longer ones will need + * multiple intervals to complete. */ - return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]); + qh->skel = uhci->skelqh[__interval_to_skel(urb->interval)]; + return uhci_submit_common(uhci, urb, qh); } /* @@ -1072,11 +1010,12 @@ static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb) /* * Isochronous transfers */ -static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb) +static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, + struct uhci_qh *qh) { - struct uhci_td *td; + struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */ int i, ret, frame; - int status, destination; + unsigned long destination, status; struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; status = TD_CTRL_ACTIVE | TD_CTRL_IOS; @@ -1092,20 +1031,25 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb) return -ENOMEM; uhci_add_td_to_urb(urb, td); - uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length), - urb->transfer_dma + urb->iso_frame_desc[i].offset); - - if (i + 1 >= urb->number_of_packets) - td->status |= cpu_to_le32(TD_CTRL_IOC); + uhci_fill_td(td, status, destination | + uhci_explen(urb->iso_frame_desc[i].length), + urb->transfer_dma + + urb->iso_frame_desc[i].offset); } + /* Set the interrupt-on-completion flag on the last packet. */ + td->status |= __constant_cpu_to_le32(TD_CTRL_IOC); + + qh->skel = uhci->skel_iso_qh; + + /* Add the TDs to the frame list */ frame = urb->start_frame; list_for_each_entry(td, &urbp->td_list, list) { - uhci_insert_td_frame_list(uhci, td, frame); + uhci_insert_td_in_frame_list(uhci, td, frame); frame += urb->interval; } - return -EINPROGRESS; + return 0; } static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) @@ -1139,80 +1083,67 @@ static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) i++; } - unlink_isochronous_tds(uhci, urb); return ret; } -static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb) -{ - struct urb_priv *up; - - /* We don't match Isoc transfers since they are special */ - if (usb_pipeisoc(urb->pipe)) - return NULL; - - list_for_each_entry(up, &uhci->urb_list, urb_list) { - struct urb *u = up->urb; - - if (u->dev == urb->dev && u->status == -EINPROGRESS) { - /* For control, ignore the direction */ - if (usb_pipecontrol(urb->pipe) && - (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN)) - return u; - else if (u->pipe == urb->pipe) - return u; - } - } - - return NULL; -} - static int uhci_urb_enqueue(struct usb_hcd *hcd, - struct usb_host_endpoint *ep, + struct usb_host_endpoint *hep, struct urb *urb, gfp_t mem_flags) { int ret; struct uhci_hcd *uhci = hcd_to_uhci(hcd); unsigned long flags; - struct urb *eurb; + struct urb_priv *urbp; + struct uhci_qh *qh; int bustime; spin_lock_irqsave(&uhci->lock, flags); ret = urb->status; if (ret != -EINPROGRESS) /* URB already unlinked! */ - goto out; + goto done; - eurb = uhci_find_urb_ep(uhci, urb); + ret = -ENOMEM; + urbp = uhci_alloc_urb_priv(uhci, urb); + if (!urbp) + goto done; - if (!uhci_alloc_urb_priv(uhci, urb)) { - ret = -ENOMEM; - goto out; + if (hep->hcpriv) + qh = (struct uhci_qh *) hep->hcpriv; + else { + qh = uhci_alloc_qh(uhci, urb->dev, hep); + if (!qh) + goto err_no_qh; } + urbp->qh = qh; switch (usb_pipetype(urb->pipe)) { case PIPE_CONTROL: - ret = uhci_submit_control(uhci, urb, eurb); + ret = uhci_submit_control(uhci, urb, qh); + break; + case PIPE_BULK: + ret = uhci_submit_bulk(uhci, urb, qh); break; case PIPE_INTERRUPT: - if (!eurb) { + if (list_empty(&qh->queue)) { bustime = usb_check_bandwidth(urb->dev, urb); if (bustime < 0) ret = bustime; else { - ret = uhci_submit_interrupt(uhci, urb, eurb); - if (ret == -EINPROGRESS) + ret = uhci_submit_interrupt(uhci, urb, qh); + if (ret == 0) usb_claim_bandwidth(urb->dev, urb, bustime, 0); } } else { /* inherit from parent */ - urb->bandwidth = eurb->bandwidth; - ret = uhci_submit_interrupt(uhci, urb, eurb); + struct urb_priv *eurbp; + + eurbp = list_entry(qh->queue.prev, struct urb_priv, + node); + urb->bandwidth = eurbp->urb->bandwidth; + ret = uhci_submit_interrupt(uhci, urb, qh); } break; - case PIPE_BULK: - ret = uhci_submit_bulk(uhci, urb, eurb); - break; case PIPE_ISOCHRONOUS: bustime = usb_check_bandwidth(urb->dev, urb); if (bustime < 0) { @@ -1220,22 +1151,59 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd, break; } - ret = uhci_submit_isochronous(uhci, urb); - if (ret == -EINPROGRESS) + ret = uhci_submit_isochronous(uhci, urb, qh); + if (ret == 0) usb_claim_bandwidth(urb->dev, urb, bustime, 1); break; } + if (ret != 0) + goto err_submit_failed; - if (ret != -EINPROGRESS) { - /* Submit failed, so delete it from the urb_list */ - struct urb_priv *urbp = urb->hcpriv; + /* Add this URB to the QH */ + urbp->qh = qh; + list_add_tail(&urbp->node, &qh->queue); + list_add_tail(&urbp->urb_list, &uhci->urb_list); - list_del_init(&urbp->urb_list); - uhci_destroy_urb_priv(uhci, urb); - } else - ret = 0; + /* If the new URB is the first and only one on this QH then either + * the QH is new and idle or else it's unlinked and waiting to + * become idle, so we can activate it right away. */ + if (qh->queue.next == &urbp->node) + uhci_activate_qh(uhci, qh); + + /* If the QH is already active, we have a race with the hardware. + * This won't get fixed until dummy TDs are added. */ + else if (qh->state == QH_STATE_ACTIVE) { + + /* If the URB isn't first on its queue, adjust the link pointer + * of the last TD in the previous URB. */ + if (urbp->node.prev != &urbp->qh->queue) { + struct urb_priv *purbp = list_entry(urbp->node.prev, + struct urb_priv, node); + struct uhci_td *ptd = list_entry(purbp->td_list.prev, + struct uhci_td, list); + struct uhci_td *td = list_entry(urbp->td_list.next, + struct uhci_td, list); + + ptd->link = cpu_to_le32(td->dma_handle); + + } + if (qh_element(qh) == UHCI_PTR_TERM) { + struct uhci_td *td = list_entry(urbp->td_list.next, + struct uhci_td, list); + + qh->element = cpu_to_le32(td->dma_handle); + } + } + goto done; + +err_submit_failed: + if (qh->state == QH_STATE_IDLE) + uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */ -out: +err_no_qh: + uhci_free_urb_priv(uhci, urbp); + +done: spin_unlock_irqrestore(&uhci->lock, flags); return ret; } @@ -1245,119 +1213,115 @@ out: */ static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb) { - int ret = -EINPROGRESS; - struct urb_priv *urbp; - - spin_lock(&urb->lock); - - urbp = (struct urb_priv *)urb->hcpriv; - - if (urb->status != -EINPROGRESS) /* URB already dequeued */ - goto out; + int status; + int okay_to_giveback = 0; + struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; switch (usb_pipetype(urb->pipe)) { case PIPE_CONTROL: - ret = uhci_result_control(uhci, urb); - break; - case PIPE_BULK: - case PIPE_INTERRUPT: - ret = uhci_result_common(uhci, urb); + status = uhci_result_control(uhci, urb); break; case PIPE_ISOCHRONOUS: - ret = uhci_result_isochronous(uhci, urb); + status = uhci_result_isochronous(uhci, urb); + break; + default: /* PIPE_BULK or PIPE_INTERRUPT */ + status = uhci_result_common(uhci, urb); break; } - if (ret == -EINPROGRESS) - goto out; - urb->status = ret; + spin_lock(&urb->lock); + if (urb->status == -EINPROGRESS) { /* Not yet dequeued */ + if (status != -EINPROGRESS) { /* URB has completed */ + urb->status = status; + + /* If the URB got a real error (as opposed to + * simply being dequeued), we don't have to + * unlink the QH. Fix this later... */ + if (status < 0) + uhci_unlink_qh(uhci, urbp->qh); + else + okay_to_giveback = 1; + } + } else { /* Already dequeued */ + if (urbp->qh->state == QH_STATE_UNLINKING && + uhci->frame_number + uhci->is_stopped != + urbp->qh->unlink_frame) + okay_to_giveback = 1; + } + spin_unlock(&urb->lock); + if (!okay_to_giveback) + return; switch (usb_pipetype(urb->pipe)) { - case PIPE_CONTROL: - case PIPE_BULK: case PIPE_ISOCHRONOUS: /* Release bandwidth for Interrupt or Isoc. transfers */ if (urb->bandwidth) usb_release_bandwidth(urb->dev, urb, 1); - uhci_unlink_generic(uhci, urb); break; case PIPE_INTERRUPT: /* Release bandwidth for Interrupt or Isoc. transfers */ /* Make sure we don't release if we have a queued URB */ - if (list_empty(&urbp->queue_list) && urb->bandwidth) + if (list_empty(&urbp->qh->queue) && urb->bandwidth) usb_release_bandwidth(urb->dev, urb, 0); else /* bandwidth was passed on to queued URB, */ /* so don't let usb_unlink_urb() release it */ urb->bandwidth = 0; - uhci_unlink_generic(uhci, urb); + /* Falls through */ + case PIPE_BULK: + if (status < 0) + uhci_fixup_toggles(urb); + break; + default: /* PIPE_CONTROL */ break; - default: - dev_info(uhci_dev(uhci), "%s: unknown pipe type %d " - "for urb %p\n", - __FUNCTION__, usb_pipetype(urb->pipe), urb); } - /* Move it from uhci->urb_list to uhci->complete_list */ - uhci_moveto_complete(uhci, urbp); + /* Take the URB's TDs off the hardware schedule */ + uhci_remove_tds_from_schedule(uhci, urb, status); -out: - spin_unlock(&urb->lock); -} - -static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb) -{ - struct list_head *head; - struct uhci_td *td; - struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv; - int prevactive = 0; + /* Take the URB off the QH's queue and see if the QH is now unused */ + list_del_init(&urbp->node); + if (list_empty(&urbp->qh->queue)) + uhci_unlink_qh(uhci, urbp->qh); uhci_dec_fsbr(uhci, urb); /* Safe since it checks */ - /* - * Now we need to find out what the last successful toggle was - * so we can update the local data toggle for the next transfer - * - * There are 2 ways the last successful completed TD is found: - * - * 1) The TD is NOT active and the actual length < expected length - * 2) The TD is NOT active and it's the last TD in the chain - * - * and a third way the first uncompleted TD is found: - * - * 3) The TD is active and the previous TD is NOT active - * - * Control and Isochronous ignore the toggle, so this is safe - * for all types - * - * FIXME: The toggle fixups won't be 100% reliable until we - * change over to using a single queue for each endpoint and - * stop the queue before unlinking. - */ - head = &urbp->td_list; - list_for_each_entry(td, head, list) { - unsigned int ctrlstat = td_status(td); + /* Queue it for giving back */ + list_move_tail(&urbp->urb_list, &uhci->complete_list); +} - if (!(ctrlstat & TD_CTRL_ACTIVE) && - (uhci_actual_length(ctrlstat) < - uhci_expected_length(td_token(td)) || - td->list.next == head)) - usb_settoggle(urb->dev, uhci_endpoint(td_token(td)), - uhci_packetout(td_token(td)), - uhci_toggle(td_token(td)) ^ 1); - else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive) - usb_settoggle(urb->dev, uhci_endpoint(td_token(td)), - uhci_packetout(td_token(td)), - uhci_toggle(td_token(td))); +/* + * Check out the QHs waiting to be fully unlinked + */ +static void uhci_scan_unlinking_qhs(struct uhci_hcd *uhci) +{ + struct uhci_qh *qh, *tmp; - prevactive = ctrlstat & TD_CTRL_ACTIVE; - } + list_for_each_entry_safe(qh, tmp, &uhci->skel_unlink_qh->node, node) { - uhci_delete_queued_urb(uhci, urb); + /* If the queue is empty and the QH is fully unlinked then + * it can become IDLE. */ + if (list_empty(&qh->queue)) { + if (uhci->frame_number + uhci->is_stopped != + qh->unlink_frame) + uhci_make_qh_idle(uhci, qh); - /* The interrupt loop will reclaim the QHs */ - uhci_remove_qh(uhci, urbp->qh); - urbp->qh = NULL; + /* If none of the QH's URBs have been dequeued then the QH + * should be re-activated. */ + } else { + struct urb_priv *urbp; + int any_dequeued = 0; + + list_for_each_entry(urbp, &qh->queue, node) { + if (urbp->urb->status != -EINPROGRESS) { + any_dequeued = 1; + break; + } + } + if (!any_dequeued) + uhci_activate_qh(uhci, qh); + } + } } static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) @@ -1370,22 +1334,11 @@ static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) urbp = urb->hcpriv; if (!urbp) /* URB was never linked! */ goto done; - list_del_init(&urbp->urb_list); + /* Remove Isochronous TDs from the frame list ASAP */ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) - unlink_isochronous_tds(uhci, urb); - uhci_unlink_generic(uhci, urb); - - uhci_get_current_frame_number(uhci); - if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age) { - uhci_remove_pending_urbps(uhci); - uhci->urb_remove_age = uhci->frame_number; - } - - /* If we're the first, set the next interrupt bit */ - if (list_empty(&uhci->urb_remove_list)) - uhci_set_next_interrupt(uhci); - list_add_tail(&urbp->urb_list, &uhci->urb_remove_list); + uhci_unlink_isochronous_tds(uhci, urb); + uhci_unlink_qh(uhci, urbp->qh); done: spin_unlock_irqrestore(&uhci->lock, flags); @@ -1426,17 +1379,6 @@ static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb) return 0; } -static void uhci_free_pending_qhs(struct uhci_hcd *uhci) -{ - struct uhci_qh *qh, *tmp; - - list_for_each_entry_safe(qh, tmp, &uhci->qh_remove_list, remove_list) { - list_del_init(&qh->remove_list); - - uhci_free_qh(uhci, qh); - } -} - static void uhci_free_pending_tds(struct uhci_hcd *uhci) { struct uhci_td *td, *tmp; @@ -1455,7 +1397,7 @@ __acquires(uhci->lock) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); - uhci_destroy_urb_priv(uhci, urb); + uhci_free_urb_priv(uhci, (struct urb_priv *) (urb->hcpriv)); spin_unlock(&uhci->lock); usb_hcd_giveback_urb(hcd, urb, regs); @@ -1474,13 +1416,6 @@ static void uhci_finish_completion(struct uhci_hcd *uhci, struct pt_regs *regs) } } -static void uhci_remove_pending_urbps(struct uhci_hcd *uhci) -{ - - /* Splice the urb_remove_list onto the end of the complete_list */ - list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev); -} - /* Process events in the schedule, but only in one thread at a time */ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) { @@ -1498,12 +1433,8 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) uhci_clear_next_interrupt(uhci); uhci_get_current_frame_number(uhci); - if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age) - uhci_free_pending_qhs(uhci); if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) uhci_free_pending_tds(uhci); - if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age) - uhci_remove_pending_urbps(uhci); /* Walk the list of pending URBs to see which ones completed * (must be _safe because uhci_transfer_result() dequeues URBs) */ @@ -1516,25 +1447,21 @@ static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs) uhci_finish_completion(uhci, regs); /* If the controller is stopped, we can finish these off right now */ - if (uhci->is_stopped) { - uhci_free_pending_qhs(uhci); + if (uhci->is_stopped) uhci_free_pending_tds(uhci); - uhci_remove_pending_urbps(uhci); - } if (uhci->need_rescan) goto rescan; uhci->scan_in_progress = 0; - if (list_empty(&uhci->urb_remove_list) && - list_empty(&uhci->td_remove_list) && - list_empty(&uhci->qh_remove_list)) + /* Check out the QHs waiting for unlinking */ + uhci_scan_unlinking_qhs(uhci); + + if (list_empty(&uhci->td_remove_list) && + list_empty(&uhci->skel_unlink_qh->node)) uhci_clear_next_interrupt(uhci); else uhci_set_next_interrupt(uhci); - - /* Wake up anyone waiting for an URB to complete */ - wake_up_all(&uhci->waitqh); } static void check_fsbr(struct uhci_hcd *uhci) |