Line data Source code
1 : /*
2 : * xHCI host controller driver
3 : *
4 : * Copyright (C) 2008 Intel Corp.
5 : *
6 : * Author: Sarah Sharp
7 : * Some code borrowed from the Linux EHCI driver.
8 : *
9 : * This program is free software; you can redistribute it and/or modify
10 : * it under the terms of the GNU General Public License version 2 as
11 : * published by the Free Software Foundation.
12 : *
13 : * This program is distributed in the hope that it will be useful, but
14 : * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 : * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 : * for more details.
17 : *
18 : * You should have received a copy of the GNU General Public License
19 : * along with this program; if not, write to the Free Software Foundation,
20 : * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 : */
22 :
23 : #include <linux/irq.h>
24 : #include <linux/module.h>
25 : #include <linux/moduleparam.h>
26 :
27 : #include "xhci.h"
28 :
29 : #define DRIVER_AUTHOR "Sarah Sharp"
30 : #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
31 :
32 : /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
33 1 : static int link_quirk;
34 : module_param(link_quirk, int, S_IRUGO | S_IWUSR);
35 : MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
36 :
37 : /* TODO: copied from ehci-hcd.c - can this be refactored? */
38 : /*
39 : * handshake - spin reading hc until handshake completes or fails
40 : * @ptr: address of hc register to be read
41 : * @mask: bits to look at in result of read
42 : * @done: value of those bits when handshake succeeds
43 : * @usec: timeout in microseconds
44 : *
45 : * Returns negative errno, or zero on success
46 : *
47 : * Success happens when the "mask" bits have the specified value (hardware
48 : * handshake done). There are two failure modes: "usec" have passed (major
49 : * hardware flakeout), or the register reads as all-ones (hardware removed).
50 : */
51 : static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
52 : u32 mask, u32 done, int usec)
53 10 : {
54 : u32 result;
55 :
56 10 : do {
57 20 : result = xhci_readl(xhci, ptr);
58 20 : if (result == ~(u32)0) /* card removed */
59 10 : return -ENODEV;
60 10 : result &= mask;
61 20 : if (result == done)
62 10 : return 0;
63 10 : udelay(1);
64 10 : usec--;
65 20 : } while (usec > 0);
66 10 : return -ETIMEDOUT;
67 10 : }
68 :
69 : /*
70 : * Disable interrupts and begin the xHCI halting process.
71 : */
72 : void xhci_quiesce(struct xhci_hcd *xhci)
73 : {
74 5 : u32 halted;
75 5 : u32 cmd;
76 5 : u32 mask;
77 5 :
78 5 : mask = ~(XHCI_IRQS);
79 15 : halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
80 10 : if (!halted)
81 5 : mask &= ~CMD_RUN;
82 :
83 10 : cmd = xhci_readl(xhci, &xhci->op_regs->command);
84 5 : cmd &= mask;
85 10 : xhci_writel(xhci, cmd, &xhci->op_regs->command);
86 5 : }
87 :
88 : /*
89 : * Force HC into halt state.
90 : *
91 : * Disable any IRQs and clear the run/stop bit.
92 : * HC will complete any current and actively pipelined transactions, and
93 : * should halt within 16 microframes of the run/stop bit being cleared.
94 : * Read HC Halted bit in the status register to see when the HC is finished.
95 : * XXX: shouldn't we set HC_STATE_HALT here somewhere?
96 : */
97 : int xhci_halt(struct xhci_hcd *xhci)
98 : {
99 5 : xhci_dbg(xhci, "// Halt the HC\n");
100 10 : xhci_quiesce(xhci);
101 :
102 15 : return handshake(xhci, &xhci->op_regs->status,
103 : STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
104 : }
105 :
106 : /*
107 : * Set the run bit and wait for the host to be running.
108 : */
109 : int xhci_start(struct xhci_hcd *xhci)
110 : {
111 1 : u32 temp;
112 1 : int ret;
113 1 :
114 3 : temp = xhci_readl(xhci, &xhci->op_regs->command);
115 2 : temp |= (CMD_RUN);
116 1 : xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
117 : temp);
118 2 : xhci_writel(xhci, temp, &xhci->op_regs->command);
119 :
120 : /*
121 : * Wait for the HCHalted Status bit to be 0 to indicate the host is
122 : * running.
123 : */
124 2 : ret = handshake(xhci, &xhci->op_regs->status,
125 : STS_HALT, 0, XHCI_MAX_HALT_USEC);
126 2 : if (ret == -ETIMEDOUT)
127 8 : xhci_err(xhci, "Host took too long to start, "
128 : "waited %u microseconds.\n",
129 : XHCI_MAX_HALT_USEC);
130 2 : return ret;
131 : }
132 :
133 : /*
134 : * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
135 : *
136 : * This resets pipelines, timers, counters, state machines, etc.
137 : * Transactions will be terminated immediately, and operational registers
138 : * will be set to their defaults.
139 : */
140 : int xhci_reset(struct xhci_hcd *xhci)
141 : {
142 2 : u32 command;
143 2 : u32 state;
144 2 : int ret;
145 2 :
146 6 : state = xhci_readl(xhci, &xhci->op_regs->status);
147 6 : if ((state & STS_HALT) == 0) {
148 18 : xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
149 4 : return 0;
150 2 : }
151 :
152 : xhci_dbg(xhci, "// Reset the HC\n");
153 4 : command = xhci_readl(xhci, &xhci->op_regs->command);
154 2 : command |= CMD_RESET;
155 4 : xhci_writel(xhci, command, &xhci->op_regs->command);
156 : /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
157 6 : xhci_to_hcd(xhci)->state = HC_STATE_HALT;
158 :
159 4 : ret = handshake(xhci, &xhci->op_regs->command,
160 : CMD_RESET, 0, 250 * 1000);
161 4 : if (ret)
162 2 : return ret;
163 :
164 : xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
165 : /*
166 : * xHCI cannot write to any doorbells or operational registers other
167 : * than status until the "Controller Not Ready" flag is cleared.
168 : */
169 6 : return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
170 : }
171 :
172 :
173 : #if 0
174 : /* Set up MSI-X table for entry 0 (may claim other entries later) */
175 : static int xhci_setup_msix(struct xhci_hcd *xhci)
176 : {
177 : int ret;
178 : struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
179 :
180 : xhci->msix_count = 0;
181 : /* XXX: did I do this right? ixgbe does kcalloc for more than one */
182 : xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL);
183 : if (!xhci->msix_entries) {
184 : xhci_err(xhci, "Failed to allocate MSI-X entries\n");
185 : return -ENOMEM;
186 : }
187 : xhci->msix_entries[0].entry = 0;
188 :
189 : ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
190 : if (ret) {
191 : xhci_err(xhci, "Failed to enable MSI-X\n");
192 : goto free_entries;
193 : }
194 :
195 : /*
196 : * Pass the xhci pointer value as the request_irq "cookie".
197 : * If more irqs are added, this will need to be unique for each one.
198 : */
199 : ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0,
200 : "xHCI", xhci_to_hcd(xhci));
201 : if (ret) {
202 : xhci_err(xhci, "Failed to allocate MSI-X interrupt\n");
203 : goto disable_msix;
204 : }
205 : xhci_dbg(xhci, "Finished setting up MSI-X\n");
206 : return 0;
207 :
208 : disable_msix:
209 : pci_disable_msix(pdev);
210 : free_entries:
211 : kfree(xhci->msix_entries);
212 : xhci->msix_entries = NULL;
213 : return ret;
214 : }
215 :
216 : /* XXX: code duplication; can xhci_setup_msix call this? */
217 : /* Free any IRQs and disable MSI-X */
218 : static void xhci_cleanup_msix(struct xhci_hcd *xhci)
219 : {
220 : struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
221 : if (!xhci->msix_entries)
222 : return;
223 :
224 : free_irq(xhci->msix_entries[0].vector, xhci);
225 : pci_disable_msix(pdev);
226 : kfree(xhci->msix_entries);
227 : xhci->msix_entries = NULL;
228 : xhci_dbg(xhci, "Finished cleaning up MSI-X\n");
229 : }
230 : #endif
231 :
232 : /*
233 : * Initialize memory for HCD and xHC (one-time init).
234 : *
235 : * Program the PAGESIZE register, initialize the device context array, create
236 : * device contexts (?), set up a command ring segment (or two?), create event
237 : * ring (one for now).
238 : */
239 : int xhci_init(struct usb_hcd *hcd)
240 : {
241 4 : struct xhci_hcd *xhci = hcd_to_xhci(hcd);
242 2 : int retval = 0;
243 1 :
244 1 : xhci_dbg(xhci, "xhci_init\n");
245 4 : spin_lock_init(&xhci->lock);
246 2 : if (link_quirk) {
247 : xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
248 1 : xhci->quirks |= XHCI_LINK_TRB_QUIRK;
249 : } else {
250 : xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
251 : }
252 3 : retval = xhci_mem_init(xhci, GFP_KERNEL);
253 : xhci_dbg(xhci, "Finished xhci_init\n");
254 :
255 1 : return retval;
256 : }
257 :
258 : /*
259 : * Called in interrupt context when there might be work
260 : * queued on the event ring
261 : *
262 : * xhci->lock must be held by caller.
263 : */
264 : static void xhci_work(struct xhci_hcd *xhci)
265 : {
266 1 : u32 temp;
267 1 : u64 temp_64;
268 :
269 : /*
270 : * Clear the op reg interrupt status first,
271 : * so we can receive interrupts from other MSI-X interrupters.
272 : * Write 1 to clear the interrupt status.
273 : */
274 2 : temp = xhci_readl(xhci, &xhci->op_regs->status);
275 1 : temp |= STS_EINT;
276 2 : xhci_writel(xhci, temp, &xhci->op_regs->status);
277 : /* FIXME when MSI-X is supported and there are multiple vectors */
278 : /* Clear the MSI-X event interrupt status */
279 :
280 : /* Acknowledge the interrupt */
281 2 : temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
282 1 : temp |= 0x3;
283 2 : xhci_writel(xhci, temp, &xhci->ir_set->irq_pending);
284 : /* Flush posted writes */
285 2 : xhci_readl(xhci, &xhci->ir_set->irq_pending);
286 :
287 3 : if (xhci->xhc_state & XHCI_STATE_DYING)
288 : xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
289 : "Shouldn't IRQs be disabled?\n");
290 : else
291 : /* FIXME this should be a delayed service routine
292 : * that clears the EHB.
293 : */
294 6 : xhci_handle_event(xhci);
295 :
296 : /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
297 4 : temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
298 2 : xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
299 : /* Flush posted writes -- FIXME is this necessary? */
300 2 : xhci_readl(xhci, &xhci->ir_set->irq_pending);
301 1 : }
302 :
303 : /*-------------------------------------------------------------------------*/
304 :
305 : /*
306 : * xHCI spec says we can get an interrupt, and if the HC has an error condition,
307 : * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
308 : * indicators of an event TRB error, but we check the status *first* to be safe.
309 : */
310 : irqreturn_t xhci_irq(struct usb_hcd *hcd)
311 : {
312 4 : struct xhci_hcd *xhci = hcd_to_xhci(hcd);
313 1 : u32 temp, temp2;
314 1 : union xhci_trb *trb;
315 1 :
316 3 : spin_lock(&xhci->lock);
317 2 : trb = xhci->event_ring->dequeue;
318 1 : /* Check if the xHC generated the interrupt, or the irq is shared */
319 3 : temp = xhci_readl(xhci, &xhci->op_regs->status);
320 3 : temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending);
321 5 : if (temp == 0xffffffff && temp2 == 0xffffffff)
322 1 : goto hw_died;
323 :
324 4 : if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) {
325 2 : spin_unlock(&xhci->lock);
326 1 : return IRQ_NONE;
327 : }
328 : xhci_dbg(xhci, "op reg status = %08x\n", temp);
329 : xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2);
330 : xhci_dbg(xhci, "Event ring dequeue ptr:\n");
331 : xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
332 : (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
333 : lower_32_bits(trb->link.segment_ptr),
334 : upper_32_bits(trb->link.segment_ptr),
335 : (unsigned int) trb->link.intr_target,
336 : (unsigned int) trb->link.control);
337 :
338 2 : if (temp & STS_FATAL) {
339 8 : xhci_warn(xhci, "WARNING: Host System Error\n");
340 2 : xhci_halt(xhci);
341 : hw_died:
342 6 : xhci_to_hcd(xhci)->state = HC_STATE_HALT;
343 2 : spin_unlock(&xhci->lock);
344 1 : return -ESHUTDOWN;
345 : }
346 :
347 2 : xhci_work(xhci);
348 2 : spin_unlock(&xhci->lock);
349 :
350 1 : return IRQ_HANDLED;
351 : }
352 :
353 : #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
354 : void xhci_event_ring_work(unsigned long arg)
355 : {
356 : unsigned long flags;
357 : int temp;
358 : u64 temp_64;
359 : struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
360 : int i, j;
361 :
362 : xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
363 :
364 : spin_lock_irqsave(&xhci->lock, flags);
365 : temp = xhci_readl(xhci, &xhci->op_regs->status);
366 : xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
367 : if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
368 : xhci_dbg(xhci, "HW died, polling stopped.\n");
369 : spin_unlock_irqrestore(&xhci->lock, flags);
370 : return;
371 : }
372 :
373 : temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
374 : xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
375 : xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
376 : xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
377 : xhci->error_bitmask = 0;
378 : xhci_dbg(xhci, "Event ring:\n");
379 : xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
380 : xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
381 : temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
382 : temp_64 &= ~ERST_PTR_MASK;
383 : xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
384 : xhci_dbg(xhci, "Command ring:\n");
385 : xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
386 : xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
387 : xhci_dbg_cmd_ptrs(xhci);
388 : for (i = 0; i < MAX_HC_SLOTS; ++i) {
389 : if (!xhci->devs[i])
390 : continue;
391 : for (j = 0; j < 31; ++j) {
392 : struct xhci_ring *ring = xhci->devs[i]->eps[j].ring;
393 : if (!ring)
394 : continue;
395 : xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
396 : xhci_debug_segment(xhci, ring->deq_seg);
397 : }
398 : }
399 :
400 : if (xhci->noops_submitted != NUM_TEST_NOOPS)
401 : if (xhci_setup_one_noop(xhci))
402 : xhci_ring_cmd_db(xhci);
403 : spin_unlock_irqrestore(&xhci->lock, flags);
404 :
405 : if (!xhci->zombie)
406 : mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
407 : else
408 : xhci_dbg(xhci, "Quit polling the event ring.\n");
409 : }
410 : #endif
411 :
412 : /*
413 : * Start the HC after it was halted.
414 : *
415 : * This function is called by the USB core when the HC driver is added.
416 : * Its opposite is xhci_stop().
417 : *
418 : * xhci_init() must be called once before this function can be called.
419 : * Reset the HC, enable device slot contexts, program DCBAAP, and
420 : * set command ring pointer and event ring pointer.
421 : *
422 : * Setup MSI-X vectors and enable interrupts.
423 : */
424 : int xhci_run(struct usb_hcd *hcd)
425 : {
426 1 : u32 temp;
427 1 : u64 temp_64;
428 4 : struct xhci_hcd *xhci = hcd_to_xhci(hcd);
429 2 : void (*doorbell)(struct xhci_hcd *) = NULL;
430 1 :
431 2 : hcd->uses_new_polling = 1;
432 1 : hcd->poll_rh = 0;
433 :
434 : xhci_dbg(xhci, "xhci_run\n");
435 : #if 0 /* FIXME: MSI not setup yet */
436 : /* Do this at the very last minute */
437 : ret = xhci_setup_msix(xhci);
438 : if (!ret)
439 : return ret;
440 :
441 : return -ENOSYS;
442 : #endif
443 : #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
444 : init_timer(&xhci->event_ring_timer);
445 : xhci->event_ring_timer.data = (unsigned long) xhci;
446 : xhci->event_ring_timer.function = xhci_event_ring_work;
447 : /* Poll the event ring */
448 : xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
449 : xhci->zombie = 0;
450 : xhci_dbg(xhci, "Setting event ring polling timer\n");
451 : add_timer(&xhci->event_ring_timer);
452 : #endif
453 :
454 : xhci_dbg(xhci, "Command ring memory map follows:\n");
455 3 : xhci_debug_ring(xhci, xhci->cmd_ring);
456 2 : xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
457 2 : xhci_dbg_cmd_ptrs(xhci);
458 :
459 : xhci_dbg(xhci, "ERST memory map follows:\n");
460 2 : xhci_dbg_erst(xhci, &xhci->erst);
461 : xhci_dbg(xhci, "Event ring:\n");
462 3 : xhci_debug_ring(xhci, xhci->event_ring);
463 2 : xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
464 2 : temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
465 1 : temp_64 &= ~ERST_PTR_MASK;
466 : xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
467 :
468 : xhci_dbg(xhci, "// Set the interrupt modulation register\n");
469 2 : temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
470 1 : temp &= ~ER_IRQ_INTERVAL_MASK;
471 1 : temp |= (u32) 160;
472 2 : xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
473 :
474 : /* Set the HCD state before we enable the irqs */
475 1 : hcd->state = HC_STATE_RUNNING;
476 2 : temp = xhci_readl(xhci, &xhci->op_regs->command);
477 1 : temp |= (CMD_EIE);
478 : xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
479 : temp);
480 2 : xhci_writel(xhci, temp, &xhci->op_regs->command);
481 :
482 2 : temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
483 : xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
484 : xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
485 2 : xhci_writel(xhci, ER_IRQ_ENABLE(temp),
486 : &xhci->ir_set->irq_pending);
487 3 : xhci_print_ir_set(xhci, xhci->ir_set, 0);
488 :
489 : if (NUM_TEST_NOOPS > 0)
490 : doorbell = xhci_setup_one_noop(xhci);
491 :
492 5 : if (xhci_start(xhci)) {
493 2 : xhci_halt(xhci);
494 1 : return -ENODEV;
495 : }
496 :
497 : xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp);
498 2 : if (doorbell)
499 2 : (*doorbell)(xhci);
500 :
501 : xhci_dbg(xhci, "Finished xhci_run\n");
502 1 : return 0;
503 : }
504 :
505 : /*
506 : * Stop xHCI driver.
507 : *
508 : * This function is called by the USB core when the HC driver is removed.
509 : * Its opposite is xhci_run().
510 : *
511 : * Disable device contexts, disable IRQs, and quiesce the HC.
512 : * Reset the HC, finish any completed transactions, and cleanup memory.
513 : */
514 : void xhci_stop(struct usb_hcd *hcd)
515 : {
516 1 : u32 temp;
517 4 : struct xhci_hcd *xhci = hcd_to_xhci(hcd);
518 1 :
519 2 : spin_lock_irq(&xhci->lock);
520 2 : xhci_halt(xhci);
521 4 : xhci_reset(xhci);
522 2 : spin_unlock_irq(&xhci->lock);
523 :
524 : #if 0 /* No MSI yet */
525 : xhci_cleanup_msix(xhci);
526 : #endif
527 : #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
528 : /* Tell the event ring poll function not to reschedule */
529 : xhci->zombie = 1;
530 : del_timer_sync(&xhci->event_ring_timer);
531 : #endif
532 :
533 : xhci_dbg(xhci, "// Disabling event ring interrupts\n");
534 2 : temp = xhci_readl(xhci, &xhci->op_regs->status);
535 2 : xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
536 2 : temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
537 2 : xhci_writel(xhci, ER_IRQ_DISABLE(temp),
538 : &xhci->ir_set->irq_pending);
539 3 : xhci_print_ir_set(xhci, xhci->ir_set, 0);
540 :
541 : xhci_dbg(xhci, "cleaning up memory\n");
542 2 : xhci_mem_cleanup(xhci);
543 : xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
544 : xhci_readl(xhci, &xhci->op_regs->status));
545 1 : }
546 :
547 : /*
548 : * Shutdown HC (not bus-specific)
549 : *
550 : * This is called when the machine is rebooting or halting. We assume that the
551 : * machine will be powered off, and the HC's internal state will be reset.
552 : * Don't bother to free memory.
553 : */
554 : void xhci_shutdown(struct usb_hcd *hcd)
555 : {
556 4 : struct xhci_hcd *xhci = hcd_to_xhci(hcd);
557 1 :
558 2 : spin_lock_irq(&xhci->lock);
559 2 : xhci_halt(xhci);
560 2 : spin_unlock_irq(&xhci->lock);
561 :
562 : #if 0
563 : xhci_cleanup_msix(xhci);
564 : #endif
565 :
566 : xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
567 : xhci_readl(xhci, &xhci->op_regs->status));
568 1 : }
569 :
570 : /*-------------------------------------------------------------------------*/
571 :
572 : /**
573 : * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
574 : * HCDs. Find the index for an endpoint given its descriptor. Use the return
575 : * value to right shift 1 for the bitmask.
576 : *
577 : * Index = (epnum * 2) + direction - 1,
578 : * where direction = 0 for OUT, 1 for IN.
579 : * For control endpoints, the IN index is used (OUT index is unused), so
580 : * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
581 : */
582 : unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
583 : {
584 13 : unsigned int index;
585 65 : if (usb_endpoint_xfer_control(desc))
586 52 : index = (unsigned int) (usb_endpoint_num(desc)*2);
587 13 : else
588 143 : index = (unsigned int) (usb_endpoint_num(desc)*2) +
589 : (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
590 26 : return index;
591 : }
592 :
593 : /* Find the flag for this endpoint (for use in the control context). Use the
594 : * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
595 : * bit 1, etc.
596 : */
597 : unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
598 : {
599 30 : return 1 << (xhci_get_endpoint_index(desc) + 1);
600 : }
601 :
602 : /* Find the flag for this endpoint (for use in the control context). Use the
603 : * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
604 : * bit 1, etc.
605 : */
606 : unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
607 : {
608 4 : return 1 << (ep_index + 1);
609 : }
610 :
611 : /* Compute the last valid endpoint context index. Basically, this is the
612 : * endpoint index plus one. For slot contexts with more than valid endpoint,
613 : * we find the most significant bit set in the added contexts flags.
614 : * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
615 : * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
616 : */
617 : unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
618 : {
619 28 : return fls(added_ctxs) - 1;
620 : }
621 :
622 : /* Returns 1 if the arguments are OK;
623 : * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
624 : */
625 : int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
626 : struct usb_host_endpoint *ep, int check_ep, const char *func) {
627 40 : if (!hcd || (check_ep && !ep) || !udev) {
628 5 : printk(KERN_DEBUG "xHCI %s called with invalid args\n",
629 : func);
630 5 : return -EINVAL;
631 : }
632 15 : if (!udev->parent) {
633 5 : printk(KERN_DEBUG "xHCI %s called for root hub\n",
634 : func);
635 5 : return 0;
636 : }
637 10 : if (!udev->slot_id) {
638 5 : printk(KERN_DEBUG "xHCI %s called with unaddressed device\n",
639 : func);
640 5 : return -EINVAL;
641 : }
642 5 : return 1;
643 : }
644 :
645 : static int xhci_configure_endpoint(struct xhci_hcd *xhci,
646 : struct usb_device *udev, struct xhci_command *command,
647 : bool ctx_change, bool must_succeed);
648 :
649 : /*
650 : * Full speed devices may have a max packet size greater than 8 bytes, but the
651 : * USB core doesn't know that until it reads the first 8 bytes of the
652 : * descriptor. If the usb_device's max packet size changes after that point,
653 : * we need to issue an evaluate context command and wait on it.
654 : */
655 : static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
656 : unsigned int ep_index, struct urb *urb)
657 : {
658 1 : struct xhci_container_ctx *in_ctx;
659 1 : struct xhci_container_ctx *out_ctx;
660 1 : struct xhci_input_control_ctx *ctrl_ctx;
661 1 : struct xhci_ep_ctx *ep_ctx;
662 1 : int max_packet_size;
663 1 : int hw_max_packet_size;
664 2 : int ret = 0;
665 :
666 1 : out_ctx = xhci->devs[slot_id]->out_ctx;
667 2 : ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
668 1 : hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2);
669 1 : max_packet_size = urb->dev->ep0.desc.wMaxPacketSize;
670 2 : if (hw_max_packet_size != max_packet_size) {
671 : xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
672 : xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
673 : max_packet_size);
674 : xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
675 : hw_max_packet_size);
676 : xhci_dbg(xhci, "Issuing evaluate context command.\n");
677 :
678 : /* Set up the modified control endpoint 0 */
679 2 : xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
680 : xhci->devs[slot_id]->out_ctx, ep_index);
681 1 : in_ctx = xhci->devs[slot_id]->in_ctx;
682 2 : ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
683 1 : ep_ctx->ep_info2 &= ~MAX_PACKET_MASK;
684 1 : ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size);
685 :
686 : /* Set up the input context flags for the command */
687 : /* FIXME: This won't work if a non-default control endpoint
688 : * changes max packet sizes.
689 : */
690 2 : ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
691 1 : ctrl_ctx->add_flags = EP0_FLAG;
692 1 : ctrl_ctx->drop_flags = 0;
693 :
694 : xhci_dbg(xhci, "Slot %d input context\n", slot_id);
695 2 : xhci_dbg_ctx(xhci, in_ctx, ep_index);
696 : xhci_dbg(xhci, "Slot %d output context\n", slot_id);
697 2 : xhci_dbg_ctx(xhci, out_ctx, ep_index);
698 :
699 5 : ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
700 : true, false);
701 :
702 : /* Clean up the input context for later use by bandwidth
703 : * functions.
704 : */
705 1 : ctrl_ctx->add_flags = SLOT_FLAG;
706 : }
707 2 : return ret;
708 : }
709 :
710 : /*
711 : * non-error returns are a promise to giveback() the urb later
712 : * we drop ownership so next owner (or urb unlink) can get it
713 : */
714 : int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
715 : {
716 4 : struct xhci_hcd *xhci = hcd_to_xhci(hcd);
717 1 : unsigned long flags;
718 2 : int ret = 0;
719 1 : unsigned int slot_id, ep_index;
720 1 :
721 1 :
722 7 : if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
723 3 : return -EINVAL;
724 1 :
725 3 : slot_id = urb->dev->slot_id;
726 4 : ep_index = xhci_get_endpoint_index(&urb->ep->desc);
727 1 :
728 5 : if (!xhci->devs || !xhci->devs[slot_id]) {
729 6 : if (!in_interrupt())
730 5 : dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
731 3 : ret = -EINVAL;
732 3 : goto exit;
733 1 : }
734 4 : if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
735 2 : if (!in_interrupt())
736 : xhci_dbg(xhci, "urb submitted during PCI suspend\n");
737 1 : ret = -ESHUTDOWN;
738 1 : goto exit;
739 : }
740 4 : if (usb_endpoint_xfer_control(&urb->ep->desc)) {
741 : /* Check to see if the max packet size for the default control
742 : * endpoint changed during FS device enumeration
743 : */
744 3 : if (urb->dev->speed == USB_SPEED_FULL) {
745 3 : ret = xhci_check_maxpacket(xhci, slot_id,
746 : ep_index, urb);
747 2 : if (ret < 0)
748 1 : return ret;
749 : }
750 :
751 : /* We have a spinlock and interrupts disabled, so we must pass
752 : * atomic context to this function, which may allocate memory.
753 : */
754 5 : spin_lock_irqsave(&xhci->lock, flags);
755 3 : if (xhci->xhc_state & XHCI_STATE_DYING)
756 1 : goto dying;
757 4 : ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
758 : slot_id, ep_index);
759 2 : spin_unlock_irqrestore(&xhci->lock, flags);
760 4 : } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
761 3 : spin_lock_irqsave(&xhci->lock, flags);
762 3 : if (xhci->xhc_state & XHCI_STATE_DYING)
763 1 : goto dying;
764 4 : ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
765 : slot_id, ep_index);
766 2 : spin_unlock_irqrestore(&xhci->lock, flags);
767 4 : } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
768 3 : spin_lock_irqsave(&xhci->lock, flags);
769 3 : if (xhci->xhc_state & XHCI_STATE_DYING)
770 1 : goto dying;
771 2 : ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
772 : slot_id, ep_index);
773 2 : spin_unlock_irqrestore(&xhci->lock, flags);
774 : } else {
775 1 : ret = -EINVAL;
776 : }
777 : exit:
778 7 : return ret;
779 7 : dying:
780 : xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
781 : "non-responsive xHCI host.\n",
782 : urb->ep->desc.bEndpointAddress, urb);
783 6 : spin_unlock_irqrestore(&xhci->lock, flags);
784 1 : return -ESHUTDOWN;
785 : }
786 :
787 : /*
788 : * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
789 : * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
790 : * should pick up where it left off in the TD, unless a Set Transfer Ring
791 : * Dequeue Pointer is issued.
792 : *
793 : * The TRBs that make up the buffers for the canceled URB will be "removed" from
794 : * the ring. Since the ring is a contiguous structure, they can't be physically
795 : * removed. Instead, there are two options:
796 : *
797 : * 1) If the HC is in the middle of processing the URB to be canceled, we
798 : * simply move the ring's dequeue pointer past those TRBs using the Set
799 : * Transfer Ring Dequeue Pointer command. This will be the common case,
800 : * when drivers timeout on the last submitted URB and attempt to cancel.
801 : *
802 : * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
803 : * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
804 : * HC will need to invalidate the any TRBs it has cached after the stop
805 : * endpoint command, as noted in the xHCI 0.95 errata.
806 : *
807 : * 3) The TD may have completed by the time the Stop Endpoint Command
808 : * completes, so software needs to handle that case too.
809 : *
810 : * This function should protect against the TD enqueueing code ringing the
811 : * doorbell while this code is waiting for a Stop Endpoint command to complete.
812 : * It also needs to account for multiple cancellations on happening at the same
813 : * time for the same endpoint.
814 : *
815 : * Note that this function can be called in any context, or so says
816 : * usb_hcd_unlink_urb()
817 : */
818 : int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
819 : {
820 1 : unsigned long flags;
821 1 : int ret;
822 1 : u32 temp;
823 1 : struct xhci_hcd *xhci;
824 1 : struct xhci_td *td;
825 1 : unsigned int ep_index;
826 1 : struct xhci_ring *ep_ring;
827 1 : struct xhci_virt_ep *ep;
828 1 :
829 3 : xhci = hcd_to_xhci(hcd);
830 3 : spin_lock_irqsave(&xhci->lock, flags);
831 : /* Make sure the URB hasn't completed or been unlinked already */
832 1 : ret = usb_hcd_check_unlink_urb(hcd, urb, status);
833 5 : if (ret || !urb->hcpriv)
834 1 : goto done;
835 2 : temp = xhci_readl(xhci, &xhci->op_regs->status);
836 2 : if (temp == 0xffffffff) {
837 : xhci_dbg(xhci, "HW died, freeing TD.\n");
838 2 : td = (struct xhci_td *) urb->hcpriv;
839 :
840 1 : usb_hcd_unlink_urb_from_ep(hcd, urb);
841 2 : spin_unlock_irqrestore(&xhci->lock, flags);
842 3 : usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN);
843 1 : kfree(td);
844 1 : return ret;
845 : }
846 3 : if (xhci->xhc_state & XHCI_STATE_DYING) {
847 : xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
848 : "non-responsive xHCI host.\n",
849 : urb->ep->desc.bEndpointAddress, urb);
850 : /* Let the stop endpoint command watchdog timer (which set this
851 : * state) finish cleaning up the endpoint TD lists. We must
852 : * have caught it in the middle of dropping a lock and giving
853 : * back an URB.
854 : */
855 1 : goto done;
856 : }
857 :
858 : xhci_dbg(xhci, "Cancel URB %p\n", urb);
859 : xhci_dbg(xhci, "Event ring:\n");
860 3 : xhci_debug_ring(xhci, xhci->event_ring);
861 3 : ep_index = xhci_get_endpoint_index(&urb->ep->desc);
862 1 : ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
863 1 : ep_ring = ep->ring;
864 : xhci_dbg(xhci, "Endpoint ring:\n");
865 3 : xhci_debug_ring(xhci, ep_ring);
866 2 : td = (struct xhci_td *) urb->hcpriv;
867 :
868 2 : list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
869 : /* Queue a stop endpoint command, but only if this is
870 : * the first cancellation to be handled.
871 : */
872 2 : if (!(ep->ep_state & EP_HALT_PENDING)) {
873 1 : ep->ep_state |= EP_HALT_PENDING;
874 1 : ep->stop_cmds_pending++;
875 1 : ep->stop_cmd_timer.expires = jiffies +
876 : XHCI_STOP_EP_CMD_TIMEOUT * HZ;
877 1 : add_timer(&ep->stop_cmd_timer);
878 2 : xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
879 2 : xhci_ring_cmd_db(xhci);
880 : }
881 : done:
882 8 : spin_unlock_irqrestore(&xhci->lock, flags);
883 1 : return ret;
884 2 : }
885 :
886 : /* Drop an endpoint from a new bandwidth configuration for this device.
887 : * Only one call to this function is allowed per endpoint before
888 : * check_bandwidth() or reset_bandwidth() must be called.
889 : * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
890 : * add the endpoint to the schedule with possibly new parameters denoted by a
891 : * different endpoint descriptor in usb_host_endpoint.
892 : * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
893 : * not allowed.
894 : *
895 : * The USB core will not allow URBs to be queued to an endpoint that is being
896 : * disabled, so there's no need for mutual exclusion to protect
897 : * the xhci->devs[slot_id] structure.
898 : */
899 : int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
900 : struct usb_host_endpoint *ep)
901 1 : {
902 1 : struct xhci_hcd *xhci;
903 1 : struct xhci_container_ctx *in_ctx, *out_ctx;
904 1 : struct xhci_input_control_ctx *ctrl_ctx;
905 1 : struct xhci_slot_ctx *slot_ctx;
906 1 : unsigned int last_ctx;
907 1 : unsigned int ep_index;
908 1 : struct xhci_ep_ctx *ep_ctx;
909 1 : u32 drop_flag;
910 1 : u32 new_add_flags, new_drop_flags, new_slot_info;
911 1 : int ret;
912 1 :
913 3 : ret = xhci_check_args(hcd, udev, ep, 1, __func__);
914 3 : if (ret <= 0)
915 2 : return ret;
916 3 : xhci = hcd_to_xhci(hcd);
917 1 : xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
918 1 :
919 3 : drop_flag = xhci_get_endpoint_flag(&ep->desc);
920 3 : if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
921 1 : xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
922 1 : __func__, drop_flag);
923 1 : return 0;
924 : }
925 :
926 4 : if (!xhci->devs || !xhci->devs[udev->slot_id]) {
927 8 : xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
928 : __func__);
929 1 : return -EINVAL;
930 : }
931 :
932 1 : in_ctx = xhci->devs[udev->slot_id]->in_ctx;
933 1 : out_ctx = xhci->devs[udev->slot_id]->out_ctx;
934 2 : ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
935 3 : ep_index = xhci_get_endpoint_index(&ep->desc);
936 2 : ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
937 : /* If the HC already knows the endpoint is disabled,
938 : * or the HCD has noted it is disabled, ignore this request
939 : */
940 7 : if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
941 1 : ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
942 10 : xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
943 : __func__, ep);
944 1 : return 0;
945 : }
946 :
947 1 : ctrl_ctx->drop_flags |= drop_flag;
948 1 : new_drop_flags = ctrl_ctx->drop_flags;
949 :
950 1 : ctrl_ctx->add_flags &= ~drop_flag;
951 1 : new_add_flags = ctrl_ctx->add_flags;
952 :
953 2 : last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
954 2 : slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
955 : /* Update the last valid endpoint context, if we deleted the last one */
956 2 : if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
957 1 : slot_ctx->dev_info &= ~LAST_CTX_MASK;
958 1 : slot_ctx->dev_info |= LAST_CTX(last_ctx);
959 : }
960 1 : new_slot_info = slot_ctx->dev_info;
961 :
962 2 : xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
963 :
964 : xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
965 : (unsigned int) ep->desc.bEndpointAddress,
966 : udev->slot_id,
967 : (unsigned int) new_drop_flags,
968 : (unsigned int) new_add_flags,
969 : (unsigned int) new_slot_info);
970 1 : return 0;
971 : }
972 :
973 : /* Add an endpoint to a new possible bandwidth configuration for this device.
974 : * Only one call to this function is allowed per endpoint before
975 : * check_bandwidth() or reset_bandwidth() must be called.
976 : * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
977 : * add the endpoint to the schedule with possibly new parameters denoted by a
978 : * different endpoint descriptor in usb_host_endpoint.
979 : * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
980 : * not allowed.
981 : *
982 : * The USB core will not allow URBs to be queued to an endpoint until the
983 : * configuration or alt setting is installed in the device, so there's no need
984 : * for mutual exclusion to protect the xhci->devs[slot_id] structure.
985 : */
986 : int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
987 : struct usb_host_endpoint *ep)
988 1 : {
989 1 : struct xhci_hcd *xhci;
990 1 : struct xhci_container_ctx *in_ctx, *out_ctx;
991 1 : unsigned int ep_index;
992 1 : struct xhci_ep_ctx *ep_ctx;
993 1 : struct xhci_slot_ctx *slot_ctx;
994 1 : struct xhci_input_control_ctx *ctrl_ctx;
995 1 : u32 added_ctxs;
996 1 : unsigned int last_ctx;
997 1 : u32 new_add_flags, new_drop_flags, new_slot_info;
998 1 : struct xhci_virt_device *virt_dev;
999 2 : int ret = 0;
1000 1 :
1001 3 : ret = xhci_check_args(hcd, udev, ep, 1, __func__);
1002 3 : if (ret <= 0) {
1003 1 : /* So we won't queue a reset ep command for a root hub */
1004 2 : ep->hcpriv = NULL;
1005 2 : return ret;
1006 1 : }
1007 3 : xhci = hcd_to_xhci(hcd);
1008 1 :
1009 3 : added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1010 3 : last_ctx = xhci_last_valid_endpoint(added_ctxs);
1011 3 : if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1012 1 : /* FIXME when we have to issue an evaluate endpoint command to
1013 1 : * deal with ep0 max packet size changing once we get the
1014 1 : * descriptors
1015 1 : */
1016 1 : xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1017 : __func__, added_ctxs);
1018 1 : return 0;
1019 : }
1020 :
1021 4 : if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1022 8 : xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1023 : __func__);
1024 1 : return -EINVAL;
1025 : }
1026 :
1027 1 : virt_dev = xhci->devs[udev->slot_id];
1028 1 : in_ctx = virt_dev->in_ctx;
1029 1 : out_ctx = virt_dev->out_ctx;
1030 2 : ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1031 3 : ep_index = xhci_get_endpoint_index(&ep->desc);
1032 2 : ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1033 :
1034 : /* If this endpoint is already in use, and the upper layers are trying
1035 : * to add it again without dropping it, reject the addition.
1036 : */
1037 6 : if (virt_dev->eps[ep_index].ring &&
1038 : !(le32_to_cpu(ctrl_ctx->drop_flags) &
1039 : xhci_get_endpoint_flag(&ep->desc))) {
1040 8 : xhci_warn(xhci, "Trying to add endpoint 0x%x "
1041 : "without dropping it.\n",
1042 : (unsigned int) ep->desc.bEndpointAddress);
1043 1 : return -EINVAL;
1044 : }
1045 :
1046 : /* If the HCD has already noted the endpoint is enabled,
1047 : * ignore this request.
1048 : */
1049 6 : if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
1050 8 : xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1051 : __func__, ep);
1052 1 : return 0;
1053 : }
1054 :
1055 : /*
1056 : * Configuration and alternate setting changes must be done in
1057 : * process context, not interrupt context (or so documenation
1058 : * for usb_set_interface() and usb_set_configuration() claim).
1059 : */
1060 5 : if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1061 : dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1062 : __func__, ep->desc.bEndpointAddress);
1063 1 : return -ENOMEM;
1064 : }
1065 :
1066 1 : ctrl_ctx->add_flags |= added_ctxs;
1067 1 : new_add_flags = ctrl_ctx->add_flags;
1068 :
1069 : /* If xhci_endpoint_disable() was called for this endpoint, but the
1070 : * xHC hasn't been notified yet through the check_bandwidth() call,
1071 : * this re-adds a new state for the endpoint from the new endpoint
1072 : * descriptors. We must drop and re-add this endpoint, so we leave the
1073 : * drop flags alone.
1074 : */
1075 1 : new_drop_flags = ctrl_ctx->drop_flags;
1076 :
1077 2 : slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1078 : /* Update the last valid endpoint context, if we just added one past */
1079 2 : if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
1080 1 : slot_ctx->dev_info &= ~LAST_CTX_MASK;
1081 1 : slot_ctx->dev_info |= LAST_CTX(last_ctx);
1082 : }
1083 1 : new_slot_info = slot_ctx->dev_info;
1084 :
1085 : /* Store the usb_device pointer for later use */
1086 1 : ep->hcpriv = udev;
1087 :
1088 : xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1089 : (unsigned int) ep->desc.bEndpointAddress,
1090 : udev->slot_id,
1091 : (unsigned int) new_drop_flags,
1092 : (unsigned int) new_add_flags,
1093 : (unsigned int) new_slot_info);
1094 1 : return 0;
1095 : }
1096 :
1097 : static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1098 : {
1099 2 : struct xhci_input_control_ctx *ctrl_ctx;
1100 2 : struct xhci_ep_ctx *ep_ctx;
1101 2 : struct xhci_slot_ctx *slot_ctx;
1102 2 : int i;
1103 :
1104 : /* When a device's add flag and drop flag are zero, any subsequent
1105 : * configure endpoint command will leave that endpoint's state
1106 : * untouched. Make sure we don't leave any old state in the input
1107 : * endpoint contexts.
1108 : */
1109 4 : ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1110 2 : ctrl_ctx->drop_flags = 0;
1111 2 : ctrl_ctx->add_flags = 0;
1112 4 : slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1113 2 : slot_ctx->dev_info &= ~LAST_CTX_MASK;
1114 : /* Endpoint 0 is always valid */
1115 2 : slot_ctx->dev_info |= LAST_CTX(1);
1116 10 : for (i = 1; i < 31; ++i) {
1117 6 : ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1118 6 : ep_ctx->ep_info = 0;
1119 2 : ep_ctx->ep_info2 = 0;
1120 2 : ep_ctx->deq = 0;
1121 2 : ep_ctx->tx_info = 0;
1122 : }
1123 2 : }
1124 :
1125 : static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1126 : struct usb_device *udev, int *cmd_status)
1127 : {
1128 4 : int ret;
1129 4 :
1130 4 : switch (*cmd_status) {
1131 16 : case COMP_ENOMEM:
1132 20 : dev_warn(&udev->dev, "Not enough host controller resources "
1133 4 : "for new device state.\n");
1134 8 : ret = -ENOMEM;
1135 4 : /* FIXME: can we allocate more resources for the HC? */
1136 8 : break;
1137 20 : case COMP_BW_ERR:
1138 20 : dev_warn(&udev->dev, "Not enough bandwidth "
1139 : "for new device state.\n");
1140 4 : ret = -ENOSPC;
1141 : /* FIXME: can we go back to the old state? */
1142 4 : break;
1143 16 : case COMP_TRB_ERR:
1144 : /* the HCD set up something wrong */
1145 16 : dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1146 : "add flag = 1, "
1147 : "and endpoint is not disabled.\n");
1148 4 : ret = -EINVAL;
1149 4 : break;
1150 16 : case COMP_SUCCESS:
1151 : dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1152 4 : ret = 0;
1153 4 : break;
1154 8 : default:
1155 36 : xhci_err(xhci, "ERROR: unexpected command completion "
1156 : "code 0x%x.\n", *cmd_status);
1157 4 : ret = -EINVAL;
1158 4 : break;
1159 : }
1160 20 : return ret;
1161 : }
1162 :
1163 : static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1164 : struct usb_device *udev, int *cmd_status)
1165 : {
1166 4 : int ret;
1167 8 : struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1168 4 :
1169 4 : switch (*cmd_status) {
1170 16 : case COMP_EINVAL:
1171 20 : dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1172 4 : "context command.\n");
1173 8 : ret = -EINVAL;
1174 8 : break;
1175 20 : case COMP_EBADSLT:
1176 20 : dev_warn(&udev->dev, "WARN: slot not enabled for"
1177 4 : "evaluate context command.\n");
1178 12 : case COMP_CTX_STATE:
1179 28 : dev_warn(&udev->dev, "WARN: invalid context state for "
1180 : "evaluate context command.\n");
1181 8 : xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1182 4 : ret = -EINVAL;
1183 4 : break;
1184 16 : case COMP_SUCCESS:
1185 : dev_dbg(&udev->dev, "Successful evaluate context command\n");
1186 4 : ret = 0;
1187 4 : break;
1188 8 : default:
1189 36 : xhci_err(xhci, "ERROR: unexpected command completion "
1190 : "code 0x%x.\n", *cmd_status);
1191 4 : ret = -EINVAL;
1192 4 : break;
1193 : }
1194 16 : return ret;
1195 : }
1196 :
1197 : /* Issue a configure endpoint command or evaluate context command
1198 : * and wait for it to finish.
1199 : */
1200 : static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1201 : struct usb_device *udev,
1202 : struct xhci_command *command,
1203 : bool ctx_change, bool must_succeed)
1204 4 : {
1205 4 : int ret;
1206 4 : int timeleft;
1207 4 : unsigned long flags;
1208 4 : struct xhci_container_ctx *in_ctx;
1209 4 : struct completion *cmd_completion;
1210 4 : int *cmd_status;
1211 4 : struct xhci_virt_device *virt_dev;
1212 4 :
1213 16 : spin_lock_irqsave(&xhci->lock, flags);
1214 8 : virt_dev = xhci->devs[udev->slot_id];
1215 12 : if (command) {
1216 8 : in_ctx = command->in_ctx;
1217 8 : cmd_completion = command->completion;
1218 8 : cmd_status = &command->status;
1219 4 : command->command_trb = xhci->cmd_ring->enqueue;
1220 8 : list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1221 : } else {
1222 4 : in_ctx = virt_dev->in_ctx;
1223 4 : cmd_completion = &virt_dev->cmd_completion;
1224 4 : cmd_status = &virt_dev->cmd_status;
1225 : }
1226 16 : init_completion(cmd_completion);
1227 :
1228 8 : if (!ctx_change)
1229 12 : ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
1230 : udev->slot_id, must_succeed);
1231 : else
1232 12 : ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1233 : udev->slot_id);
1234 16 : if (ret < 0) {
1235 16 : spin_unlock_irqrestore(&xhci->lock, flags);
1236 : xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1237 4 : return -ENOMEM;
1238 : }
1239 16 : xhci_ring_cmd_db(xhci);
1240 8 : spin_unlock_irqrestore(&xhci->lock, flags);
1241 :
1242 : /* Wait for the configure endpoint command to complete */
1243 8 : timeleft = wait_for_completion_interruptible_timeout(
1244 : cmd_completion,
1245 : USB_CTRL_SET_TIMEOUT);
1246 8 : if (timeleft <= 0) {
1247 56 : xhci_warn(xhci, "%s while waiting for %s command\n",
1248 16 : timeleft == 0 ? "Timeout" : "Signal",
1249 : ctx_change == 0 ?
1250 : "configure endpoint" :
1251 : "evaluate context");
1252 : /* FIXME cancel the configure endpoint command */
1253 4 : return -ETIME;
1254 : }
1255 :
1256 8 : if (!ctx_change)
1257 28 : return xhci_configure_endpoint_result(xhci, udev, cmd_status);
1258 24 : return xhci_evaluate_context_result(xhci, udev, cmd_status);
1259 : }
1260 :
1261 : /* Called after one or more calls to xhci_add_endpoint() or
1262 : * xhci_drop_endpoint(). If this call fails, the USB core is expected
1263 : * to call xhci_reset_bandwidth().
1264 : *
1265 : * Since we are in the middle of changing either configuration or
1266 : * installing a new alt setting, the USB core won't allow URBs to be
1267 : * enqueued for any endpoint on the old config or interface. Nothing
1268 : * else should be touching the xhci->devs[slot_id] structure, so we
1269 : * don't need to take the xhci->lock for manipulating that.
1270 : */
1271 : int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1272 : {
1273 1 : int i;
1274 2 : int ret = 0;
1275 1 : struct xhci_hcd *xhci;
1276 1 : struct xhci_virt_device *virt_dev;
1277 1 : struct xhci_input_control_ctx *ctrl_ctx;
1278 1 : struct xhci_slot_ctx *slot_ctx;
1279 1 :
1280 3 : ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1281 3 : if (ret <= 0)
1282 2 : return ret;
1283 3 : xhci = hcd_to_xhci(hcd);
1284 :
1285 6 : if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
1286 8 : xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1287 : __func__);
1288 1 : return -EINVAL;
1289 : }
1290 : xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1291 1 : virt_dev = xhci->devs[udev->slot_id];
1292 :
1293 : /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1294 2 : ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1295 1 : ctrl_ctx->add_flags |= SLOT_FLAG;
1296 1 : ctrl_ctx->add_flags &= ~EP0_FLAG;
1297 1 : ctrl_ctx->drop_flags &= ~SLOT_FLAG;
1298 1 : ctrl_ctx->drop_flags &= ~EP0_FLAG;
1299 : xhci_dbg(xhci, "New Input Control Context:\n");
1300 2 : slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1301 2 : xhci_dbg_ctx(xhci, virt_dev->in_ctx,
1302 : LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1303 :
1304 5 : ret = xhci_configure_endpoint(xhci, udev, NULL,
1305 : false, false);
1306 2 : if (ret) {
1307 : /* Callee should call reset_bandwidth() */
1308 1 : return ret;
1309 : }
1310 :
1311 : xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
1312 2 : xhci_dbg_ctx(xhci, virt_dev->out_ctx,
1313 : LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1314 :
1315 2 : xhci_zero_in_ctx(xhci, virt_dev);
1316 : /* Install new rings and free or cache any old rings */
1317 6 : for (i = 1; i < 31; ++i) {
1318 3 : int rings_cached;
1319 1 :
1320 2 : if (!virt_dev->eps[i].new_ring)
1321 1 : continue;
1322 : /* Only cache or free the old ring if it exists.
1323 : * It may not if this is the first add of an endpoint.
1324 : */
1325 2 : if (virt_dev->eps[i].ring) {
1326 1 : rings_cached = virt_dev->num_rings_cached;
1327 2 : if (rings_cached < XHCI_MAX_RINGS_CACHED) {
1328 1 : virt_dev->num_rings_cached++;
1329 1 : rings_cached = virt_dev->num_rings_cached;
1330 1 : virt_dev->ring_cache[rings_cached] =
1331 : virt_dev->eps[i].ring;
1332 : xhci_dbg(xhci, "Cached old ring, "
1333 : "%d ring%s cached\n",
1334 : rings_cached,
1335 : (rings_cached > 1) ? "s" : "");
1336 : } else {
1337 3 : xhci_ring_free(xhci, virt_dev->eps[i].ring);
1338 : xhci_dbg(xhci, "Ring cache full (%d rings), "
1339 : "freeing ring\n",
1340 : virt_dev->num_rings_cached);
1341 : }
1342 : }
1343 2 : virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1344 2 : virt_dev->eps[i].new_ring = NULL;
1345 2 : }
1346 :
1347 1 : return ret;
1348 : }
1349 :
1350 : void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1351 : {
1352 1 : struct xhci_hcd *xhci;
1353 1 : struct xhci_virt_device *virt_dev;
1354 1 : int i, ret;
1355 1 :
1356 3 : ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1357 3 : if (ret <= 0)
1358 2 : return;
1359 3 : xhci = hcd_to_xhci(hcd);
1360 :
1361 4 : if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1362 8 : xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1363 : __func__);
1364 1 : return;
1365 : }
1366 : xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1367 1 : virt_dev = xhci->devs[udev->slot_id];
1368 : /* Free any rings allocated for added endpoints */
1369 6 : for (i = 0; i < 31; ++i) {
1370 5 : if (virt_dev->eps[i].new_ring) {
1371 4 : xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
1372 1 : virt_dev->eps[i].new_ring = NULL;
1373 : }
1374 : }
1375 2 : xhci_zero_in_ctx(xhci, virt_dev);
1376 1 : }
1377 :
1378 : static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1379 : struct xhci_container_ctx *in_ctx,
1380 : struct xhci_container_ctx *out_ctx,
1381 : u32 add_flags, u32 drop_flags)
1382 4 : {
1383 4 : struct xhci_input_control_ctx *ctrl_ctx;
1384 8 : ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1385 4 : ctrl_ctx->add_flags = add_flags;
1386 4 : ctrl_ctx->drop_flags = drop_flags;
1387 8 : xhci_slot_copy(xhci, in_ctx, out_ctx);
1388 4 : ctrl_ctx->add_flags |= SLOT_FLAG;
1389 :
1390 : xhci_dbg(xhci, "Input Context:\n");
1391 16 : xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1392 4 : }
1393 :
1394 : void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1395 : unsigned int slot_id, unsigned int ep_index,
1396 : struct xhci_dequeue_state *deq_state)
1397 4 : {
1398 4 : struct xhci_container_ctx *in_ctx;
1399 4 : struct xhci_ep_ctx *ep_ctx;
1400 4 : u32 added_ctxs;
1401 4 : dma_addr_t addr;
1402 4 :
1403 12 : xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1404 4 : xhci->devs[slot_id]->out_ctx, ep_index);
1405 8 : in_ctx = xhci->devs[slot_id]->in_ctx;
1406 12 : ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1407 12 : addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
1408 4 : deq_state->new_deq_ptr);
1409 8 : if (addr == 0) {
1410 32 : xhci_warn(xhci, "WARN Cannot submit config ep after "
1411 : "reset ep command\n");
1412 32 : xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
1413 : deq_state->new_deq_seg,
1414 : deq_state->new_deq_ptr);
1415 4 : return;
1416 : }
1417 8 : ep_ctx->deq = addr | deq_state->new_cycle_state;
1418 :
1419 8 : added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
1420 8 : xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
1421 : xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
1422 4 : }
1423 :
1424 : void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1425 : struct usb_device *udev, unsigned int ep_index)
1426 4 : {
1427 4 : struct xhci_dequeue_state deq_state;
1428 : struct xhci_virt_ep *ep;
1429 :
1430 : xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
1431 4 : ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1432 : /* We need to move the HW's dequeue pointer past this TD,
1433 : * or it will attempt to resend it on the next doorbell ring.
1434 : */
1435 20 : xhci_find_new_dequeue_state(xhci, udev->slot_id,
1436 : ep_index, ep->stopped_td,
1437 : &deq_state);
1438 :
1439 : /* HW with the reset endpoint quirk will use the saved dequeue state to
1440 : * issue a configure endpoint command later.
1441 : */
1442 8 : if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
1443 : xhci_dbg(xhci, "Queueing new dequeue state\n");
1444 12 : xhci_queue_new_dequeue_state(xhci, udev->slot_id,
1445 : ep_index, &deq_state);
1446 : } else {
1447 : /* Better hope no one uses the input context between now and the
1448 : * reset endpoint completion!
1449 : */
1450 : xhci_dbg(xhci, "Setting up input context for "
1451 : "configure endpoint command\n");
1452 16 : xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
1453 : ep_index, &deq_state);
1454 : }
1455 8 : }
1456 :
1457 : /* Deal with stalled endpoints. The core should have sent the control message
1458 : * to clear the halt condition. However, we need to make the xHCI hardware
1459 : * reset its sequence number, since a device will expect a sequence number of
1460 : * zero after the halt condition is cleared.
1461 : * Context: in_interrupt
1462 : */
1463 : void xhci_endpoint_reset(struct usb_hcd *hcd,
1464 : struct usb_host_endpoint *ep)
1465 1 : {
1466 1 : struct xhci_hcd *xhci;
1467 1 : struct usb_device *udev;
1468 1 : unsigned int ep_index;
1469 1 : unsigned long flags;
1470 1 : int ret;
1471 1 : struct xhci_virt_ep *virt_ep;
1472 1 :
1473 3 : xhci = hcd_to_xhci(hcd);
1474 3 : udev = (struct usb_device *) ep->hcpriv;
1475 1 : /* Called with a root hub endpoint (or an endpoint that wasn't added
1476 1 : * with xhci_add_endpoint()
1477 : */
1478 3 : if (!ep->hcpriv)
1479 1 : return;
1480 3 : ep_index = xhci_get_endpoint_index(&ep->desc);
1481 1 : virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1482 3 : if (!virt_ep->stopped_td) {
1483 : xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
1484 : ep->desc.bEndpointAddress);
1485 1 : return;
1486 : }
1487 4 : if (usb_endpoint_xfer_control(&ep->desc)) {
1488 : xhci_dbg(xhci, "Control endpoint stall already handled.\n");
1489 1 : return;
1490 : }
1491 :
1492 : xhci_dbg(xhci, "Queueing reset endpoint command\n");
1493 3 : spin_lock_irqsave(&xhci->lock, flags);
1494 2 : ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
1495 : /*
1496 : * Can't change the ring dequeue pointer until it's transitioned to the
1497 : * stopped state, which is only upon a successful reset endpoint
1498 : * command. Better hope that last command worked!
1499 : */
1500 2 : if (!ret) {
1501 3 : xhci_cleanup_stalled_ring(xhci, udev, ep_index);
1502 2 : kfree(virt_ep->stopped_td);
1503 2 : xhci_ring_cmd_db(xhci);
1504 : }
1505 2 : virt_ep->stopped_td = NULL;
1506 2 : virt_ep->stopped_trb = NULL;
1507 4 : spin_unlock_irqrestore(&xhci->lock, flags);
1508 :
1509 2 : if (ret)
1510 8 : xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1511 2 : }
1512 :
1513 : /*
1514 : * At this point, the struct usb_device is about to go away, the device has
1515 : * disconnected, and all traffic has been stopped and the endpoints have been
1516 : * disabled. Free any HC data structures associated with that device.
1517 : */
1518 : void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
1519 : {
1520 4 : struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1521 1 : struct xhci_virt_device *virt_dev;
1522 1 : unsigned long flags;
1523 1 : u32 state;
1524 1 : int i;
1525 1 :
1526 3 : if (udev->slot_id == 0)
1527 2 : return;
1528 1 : virt_dev = xhci->devs[udev->slot_id];
1529 2 : if (!virt_dev)
1530 1 : return;
1531 :
1532 : /* Stop any wayward timer functions (which may grab the lock) */
1533 5 : for (i = 0; i < 31; ++i) {
1534 2 : virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
1535 4 : del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
1536 : }
1537 :
1538 3 : spin_lock_irqsave(&xhci->lock, flags);
1539 : /* Don't disable the slot if the host controller is dead. */
1540 2 : state = xhci_readl(xhci, &xhci->op_regs->status);
1541 5 : if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
1542 6 : xhci_free_virt_device(xhci, udev->slot_id);
1543 2 : spin_unlock_irqrestore(&xhci->lock, flags);
1544 1 : return;
1545 : }
1546 :
1547 5 : if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
1548 2 : spin_unlock_irqrestore(&xhci->lock, flags);
1549 : xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1550 1 : return;
1551 : }
1552 2 : xhci_ring_cmd_db(xhci);
1553 2 : spin_unlock_irqrestore(&xhci->lock, flags);
1554 1 : /*
1555 : * Event command completion handler will free any data structures
1556 : * associated with the slot. XXX Can free sleep?
1557 : */
1558 : }
1559 :
1560 : /*
1561 : * Returns 0 if the xHC ran out of device slots, the Enable Slot command
1562 : * timed out, or allocating memory failed. Returns 1 on success.
1563 : */
1564 : int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
1565 : {
1566 4 : struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1567 1 : unsigned long flags;
1568 1 : int timeleft;
1569 1 : int ret;
1570 1 :
1571 4 : spin_lock_irqsave(&xhci->lock, flags);
1572 3 : ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
1573 3 : if (ret) {
1574 3 : spin_unlock_irqrestore(&xhci->lock, flags);
1575 1 : xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1576 2 : return 0;
1577 1 : }
1578 3 : xhci_ring_cmd_db(xhci);
1579 3 : spin_unlock_irqrestore(&xhci->lock, flags);
1580 1 :
1581 1 : /* XXX: how much time for xHC slot assignment? */
1582 3 : timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1583 1 : USB_CTRL_SET_TIMEOUT);
1584 3 : if (timeleft <= 0) {
1585 14 : xhci_warn(xhci, "%s while waiting for a slot\n",
1586 1 : timeleft == 0 ? "Timeout" : "Signal");
1587 1 : /* FIXME cancel the enable slot request */
1588 1 : return 0;
1589 : }
1590 :
1591 2 : if (!xhci->slot_id) {
1592 8 : xhci_err(xhci, "Error while assigning device slot ID\n");
1593 1 : return 0;
1594 : }
1595 : /* xhci_alloc_virt_device() does not touch rings; no need to lock */
1596 7 : if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
1597 : /* Disable slot, if we can do it without mem alloc */
1598 8 : xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
1599 3 : spin_lock_irqsave(&xhci->lock, flags);
1600 5 : if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
1601 2 : xhci_ring_cmd_db(xhci);
1602 4 : spin_unlock_irqrestore(&xhci->lock, flags);
1603 1 : return 0;
1604 : }
1605 1 : udev->slot_id = xhci->slot_id;
1606 : /* Is this a LS or FS device under a HS hub? */
1607 : /* Hub or peripherial? */
1608 1 : return 1;
1609 : }
1610 :
1611 : /*
1612 : * Issue an Address Device command (which will issue a SetAddress request to
1613 : * the device).
1614 : * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
1615 : * we should only issue and wait on one address command at the same time.
1616 : *
1617 : * We add one to the device address issued by the hardware because the USB core
1618 : * uses address 1 for the root hubs (even though they're not really devices).
1619 : */
1620 : int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
1621 : {
1622 1 : unsigned long flags;
1623 1 : int timeleft;
1624 1 : struct xhci_virt_device *virt_dev;
1625 2 : int ret = 0;
1626 4 : struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1627 1 : struct xhci_slot_ctx *slot_ctx;
1628 1 : struct xhci_input_control_ctx *ctrl_ctx;
1629 1 : u64 temp_64;
1630 1 :
1631 3 : if (!udev->slot_id) {
1632 1 : xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
1633 2 : return -EINVAL;
1634 1 : }
1635 1 :
1636 2 : virt_dev = xhci->devs[udev->slot_id];
1637 1 :
1638 1 : /* If this is a Set Address to an unconfigured device, setup ep 0 */
1639 4 : if (!udev->config)
1640 5 : xhci_setup_addressable_virt_dev(xhci, udev);
1641 1 : /* Otherwise, assume the core has the device configured how it wants */
1642 1 : xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1643 5 : xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
1644 1 :
1645 4 : spin_lock_irqsave(&xhci->lock, flags);
1646 4 : ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
1647 : udev->slot_id);
1648 2 : if (ret) {
1649 2 : spin_unlock_irqrestore(&xhci->lock, flags);
1650 : xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
1651 1 : return ret;
1652 : }
1653 2 : xhci_ring_cmd_db(xhci);
1654 2 : spin_unlock_irqrestore(&xhci->lock, flags);
1655 :
1656 : /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
1657 2 : timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
1658 : USB_CTRL_SET_TIMEOUT);
1659 : /* FIXME: From section 4.3.4: "Software shall be responsible for timing
1660 : * the SetAddress() "recovery interval" required by USB and aborting the
1661 : * command on a timeout.
1662 : */
1663 2 : if (timeleft <= 0) {
1664 13 : xhci_warn(xhci, "%s while waiting for a slot\n",
1665 : timeleft == 0 ? "Timeout" : "Signal");
1666 : /* FIXME cancel the address device command */
1667 1 : return -ETIME;
1668 : }
1669 :
1670 : switch (virt_dev->cmd_status) {
1671 3 : case COMP_CTX_STATE:
1672 3 : case COMP_EBADSLT:
1673 8 : xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
1674 : udev->slot_id);
1675 1 : ret = -EINVAL;
1676 1 : break;
1677 4 : case COMP_TX_ERR:
1678 4 : dev_warn(&udev->dev, "Device not responding to set address.\n");
1679 1 : ret = -EPROTO;
1680 1 : break;
1681 4 : case COMP_SUCCESS:
1682 : xhci_dbg(xhci, "Successful Address Device command\n");
1683 1 : break;
1684 2 : default:
1685 9 : xhci_err(xhci, "ERROR: unexpected command completion "
1686 1 : "code 0x%x.\n", virt_dev->cmd_status);
1687 : xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1688 2 : xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
1689 1 : ret = -EINVAL;
1690 1 : break;
1691 : }
1692 8 : if (ret) {
1693 4 : return ret;
1694 : }
1695 8 : temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
1696 : xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
1697 : xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
1698 : udev->slot_id,
1699 : &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
1700 : (unsigned long long)
1701 : xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
1702 : xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
1703 : (unsigned long long)virt_dev->out_ctx->dma);
1704 : xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
1705 2 : xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
1706 : xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
1707 2 : xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
1708 : /*
1709 : * USB core uses address 1 for the roothubs, so we add one to the
1710 : * address given back to us by the HC.
1711 : */
1712 2 : slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1713 1 : udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
1714 : /* Zero the input context control for later use */
1715 2 : ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1716 1 : ctrl_ctx->add_flags = 0;
1717 1 : ctrl_ctx->drop_flags = 0;
1718 :
1719 : xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
1720 : /* XXX Meh, not sure if anyone else but choose_address uses this. */
1721 3 : set_bit(udev->devnum, udev->bus->devmap.devicemap);
1722 :
1723 1 : return 0;
1724 : }
1725 :
1726 : /* Once a hub descriptor is fetched for a device, we need to update the xHC's
1727 : * internal data structures for the device.
1728 : */
1729 : int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
1730 : struct usb_tt *tt, gfp_t mem_flags)
1731 : {
1732 4 : struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1733 1 : struct xhci_virt_device *vdev;
1734 1 : struct xhci_command *config_cmd;
1735 1 : struct xhci_input_control_ctx *ctrl_ctx;
1736 1 : struct xhci_slot_ctx *slot_ctx;
1737 1 : unsigned long flags;
1738 1 : unsigned think_time;
1739 1 : int ret;
1740 1 :
1741 1 : /* Ignore root hubs */
1742 4 : if (!hdev->parent)
1743 2 : return 0;
1744 1 :
1745 2 : vdev = xhci->devs[hdev->slot_id];
1746 2 : if (!vdev) {
1747 8 : xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
1748 1 : return -EINVAL;
1749 : }
1750 5 : config_cmd = xhci_alloc_command(xhci, true, mem_flags);
1751 2 : if (!config_cmd) {
1752 : xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
1753 1 : return -ENOMEM;
1754 : }
1755 :
1756 3 : spin_lock_irqsave(&xhci->lock, flags);
1757 2 : xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
1758 2 : ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
1759 1 : ctrl_ctx->add_flags |= SLOT_FLAG;
1760 2 : slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
1761 1 : slot_ctx->dev_info |= DEV_HUB;
1762 2 : if (tt->multi)
1763 1 : slot_ctx->dev_info |= DEV_MTT;
1764 3 : if (xhci->hci_version > 0x95) {
1765 : xhci_dbg(xhci, "xHCI version %x needs hub "
1766 : "TT think time and number of ports\n",
1767 : (unsigned int) xhci->hci_version);
1768 1 : slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild);
1769 : /* Set TT think time - convert from ns to FS bit times.
1770 : * 0 = 8 FS bit times, 1 = 16 FS bit times,
1771 : * 2 = 24 FS bit times, 3 = 32 FS bit times.
1772 : */
1773 1 : think_time = tt->think_time;
1774 2 : if (think_time != 0)
1775 1 : think_time = (think_time / 666) - 1;
1776 1 : slot_ctx->tt_info |= TT_THINK_TIME(think_time);
1777 : } else {
1778 : xhci_dbg(xhci, "xHCI version %x doesn't need hub "
1779 : "TT think time or number of ports\n",
1780 : (unsigned int) xhci->hci_version);
1781 : }
1782 1 : slot_ctx->dev_state = 0;
1783 2 : spin_unlock_irqrestore(&xhci->lock, flags);
1784 :
1785 : xhci_dbg(xhci, "Set up %s for hub device.\n",
1786 : (xhci->hci_version > 0x95) ?
1787 : "configure endpoint" : "evaluate context");
1788 : xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
1789 2 : xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
1790 :
1791 : /* Issue and wait for the configure endpoint or
1792 : * evaluate context command.
1793 : */
1794 3 : if (xhci->hci_version > 0x95)
1795 5 : ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
1796 : false, false);
1797 : else
1798 5 : ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
1799 : true, false);
1800 :
1801 : xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
1802 4 : xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
1803 :
1804 2 : xhci_free_command(xhci, config_cmd);
1805 1 : return ret;
1806 : }
1807 :
1808 : int xhci_get_frame(struct usb_hcd *hcd)
1809 : {
1810 4 : struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1811 1 : /* EHCI mods by the periodic size. Why? */
1812 4 : return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
1813 : }
1814 :
1815 : MODULE_DESCRIPTION(DRIVER_DESC);
1816 : MODULE_AUTHOR(DRIVER_AUTHOR);
1817 : MODULE_LICENSE("GPL");
1818 :
1819 : static int __init xhci_hcd_init(void)
1820 : {
1821 1 : #ifdef CONFIG_PCI
1822 1 : int retval = 0;
1823 :
1824 2 : retval = xhci_register_pci();
1825 :
1826 2 : if (retval < 0) {
1827 1 : printk(KERN_DEBUG "Problem registering PCI driver.");
1828 1 : return retval;
1829 : }
1830 : #endif
1831 : /*
1832 : * Check the compiler generated sizes of structures that must be laid
1833 : * out in specific ways for hardware access.
1834 : */
1835 : BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
1836 : BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
1837 : BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
1838 : /* xhci_device_control has eight fields, and also
1839 : * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
1840 : */
1841 : BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
1842 : BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
1843 : BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
1844 : BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
1845 : BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
1846 : /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
1847 : BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
1848 : BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
1849 1 : return 0;
1850 : }
1851 : module_init(xhci_hcd_init);
1852 :
1853 : static void __exit xhci_hcd_cleanup(void)
1854 : {
1855 : #ifdef CONFIG_PCI
1856 4 : xhci_unregister_pci();
1857 2 : #endif
1858 : }
1859 : module_exit(xhci_hcd_cleanup);
|