Line data Source code
1 : /*
2 : * xHCI host controller driver
3 : *
4 : * Copyright (C) 2008 Intel Corp.
5 : *
6 : * Author: Sarah Sharp
7 : * Some code borrowed from the Linux EHCI driver.
8 : *
9 : * This program is free software; you can redistribute it and/or modify
10 : * it under the terms of the GNU General Public License version 2 as
11 : * published by the Free Software Foundation.
12 : *
13 : * This program is distributed in the hope that it will be useful, but
14 : * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 : * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 : * for more details.
17 : *
18 : * You should have received a copy of the GNU General Public License
19 : * along with this program; if not, write to the Free Software Foundation,
20 : * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 : */
22 :
23 : #include <linux/usb.h>
24 : #include <linux/pci.h>
25 : #include <linux/dmapool.h>
26 :
27 : #include "xhci.h"
28 :
29 : /*
30 : * Allocates a generic ring segment from the ring pool, sets the dma address,
31 : * initializes the segment to zero, and sets the private next pointer to NULL.
32 : *
33 : * Section 4.11.1.1:
34 : * "All components of all Command and Transfer TRBs shall be initialized to '0'"
35 : */
36 : static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
37 : {
38 8 : struct xhci_segment *seg;
39 8 : dma_addr_t dma;
40 8 :
41 32 : seg = kzalloc(sizeof *seg, flags);
42 16 : if (!seg)
43 8 : return 0;
44 : xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
45 :
46 16 : seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
47 24 : if (!seg->trbs) {
48 8 : kfree(seg);
49 8 : return 0;
50 : }
51 : xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
52 : seg->trbs, (unsigned long long)dma);
53 :
54 16 : memset(seg->trbs, 0, SEGMENT_SIZE);
55 8 : seg->dma = dma;
56 8 : seg->next = NULL;
57 :
58 8 : return seg;
59 : }
60 :
61 : static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
62 : {
63 136 : if (!seg)
64 68 : return;
65 204 : if (seg->trbs) {
66 : xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
67 : seg->trbs, (unsigned long long)seg->dma);
68 136 : dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
69 68 : seg->trbs = NULL;
70 : }
71 : xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
72 68 : kfree(seg);
73 68 : }
74 :
75 : /*
76 : * Make the prev segment point to the next segment.
77 : *
78 : * Change the last TRB in the prev segment to be a Link TRB which points to the
79 : * DMA address of the next segment. The caller needs to set any Link TRB
80 : * related flags, such as End TRB, Toggle Cycle, and no snoop.
81 : */
82 : static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
83 : struct xhci_segment *next, bool link_trbs)
84 : {
85 9 : u32 val;
86 9 :
87 36 : if (!prev || !next)
88 9 : return;
89 9 : prev->next = next;
90 18 : if (link_trbs) {
91 9 : prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
92 :
93 : /* Set the last TRB in the segment to have a TRB type ID of Link TRB */
94 9 : val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
95 9 : val &= ~TRB_TYPE_BITMASK;
96 9 : val |= TRB_TYPE(TRB_LINK);
97 : /* Always set the chain bit with 0.95 hardware */
98 36 : if (xhci_link_trb_quirk(xhci))
99 9 : val |= TRB_CHAIN;
100 9 : prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
101 : }
102 : xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
103 : (unsigned long long)prev->dma,
104 18 : (unsigned long long)next->dma);
105 : }
106 :
107 : /* XXX: Do we need the hcd structure in all these functions? */
108 : void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
109 : {
110 34 : struct xhci_segment *seg;
111 34 : struct xhci_segment *first_seg;
112 34 :
113 68 : if (!ring)
114 34 : return;
115 102 : if (ring->first_seg) {
116 34 : first_seg = ring->first_seg;
117 34 : seg = first_seg->next;
118 : xhci_dbg(xhci, "Freeing ring at %p\n", ring);
119 102 : while (seg != first_seg) {
120 68 : struct xhci_segment *next = seg->next;
121 102 : xhci_segment_free(xhci, seg);
122 34 : seg = next;
123 : }
124 102 : xhci_segment_free(xhci, first_seg);
125 34 : ring->first_seg = NULL;
126 : }
127 68 : kfree(ring);
128 68 : }
129 :
130 : static void xhci_initialize_ring_info(struct xhci_ring *ring)
131 : {
132 : /* The ring is empty, so the enqueue pointer == dequeue pointer */
133 5 : ring->enqueue = ring->first_seg->trbs;
134 5 : ring->enq_seg = ring->first_seg;
135 5 : ring->dequeue = ring->enqueue;
136 5 : ring->deq_seg = ring->first_seg;
137 : /* The ring is initialized to 0. The producer must write 1 to the cycle
138 : * bit to handover ownership of the TRB, so PCS = 1. The consumer must
139 : * compare CCS to the cycle bit to check ownership, so CCS = 1.
140 : */
141 5 : ring->cycle_state = 1;
142 : /* Not necessary for new rings, but needed for re-initialized rings */
143 5 : ring->enq_updates = 0;
144 5 : ring->deq_updates = 0;
145 5 : }
146 :
147 : /**
148 : * Create a new ring with zero or more segments.
149 : *
150 : * Link each segment together into a ring.
151 : * Set the end flag and the cycle toggle bit on the last segment.
152 : * See section 4.9.1 and figures 15 and 16.
153 : */
154 : static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
155 : unsigned int num_segs, bool link_trbs, gfp_t flags)
156 : {
157 4 : struct xhci_ring *ring;
158 4 : struct xhci_segment *prev;
159 4 :
160 16 : ring = kzalloc(sizeof *(ring), flags);
161 : xhci_dbg(xhci, "Allocating ring at %p\n", ring);
162 8 : if (!ring)
163 4 : return 0;
164 :
165 8 : INIT_LIST_HEAD(&ring->td_list);
166 8 : if (num_segs == 0)
167 4 : return ring;
168 :
169 8 : ring->first_seg = xhci_segment_alloc(xhci, flags);
170 12 : if (!ring->first_seg)
171 4 : goto fail;
172 4 : num_segs--;
173 :
174 4 : prev = ring->first_seg;
175 12 : while (num_segs > 0) {
176 4 : struct xhci_segment *next;
177 4 :
178 8 : next = xhci_segment_alloc(xhci, flags);
179 8 : if (!next)
180 4 : goto fail;
181 12 : xhci_link_segments(xhci, prev, next, link_trbs);
182 :
183 4 : prev = next;
184 4 : num_segs--;
185 : }
186 16 : xhci_link_segments(xhci, prev, ring->first_seg, link_trbs);
187 :
188 8 : if (link_trbs) {
189 : /* See section 4.9.2.1 and 6.4.4.1 */
190 4 : prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
191 : xhci_dbg(xhci, "Wrote link toggle flag to"
192 : " segment %p (virtual), 0x%llx (DMA)\n",
193 : prev, (unsigned long long)prev->dma);
194 : }
195 8 : xhci_initialize_ring_info(ring);
196 4 : return ring;
197 8 :
198 : fail:
199 24 : xhci_ring_free(xhci, ring);
200 4 : return 0;
201 : }
202 :
203 : /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
204 : * pointers to the beginning of the ring.
205 : */
206 : static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
207 : struct xhci_ring *ring)
208 1 : {
209 1 : struct xhci_segment *seg = ring->first_seg;
210 1 : do {
211 2 : memset(seg->trbs, 0,
212 : sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
213 : /* All endpoint rings have link TRBs */
214 3 : xhci_link_segments(xhci, seg, seg->next, 1);
215 1 : seg = seg->next;
216 3 : } while (seg != ring->first_seg);
217 2 : xhci_initialize_ring_info(ring);
218 1 : /* td list should be empty since all URBs have been cancelled,
219 : * but just in case...
220 : */
221 2 : INIT_LIST_HEAD(&ring->td_list);
222 1 : }
223 :
224 : #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
225 :
226 : struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
227 : int type, gfp_t flags)
228 : {
229 12 : struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
230 9 : if (!ctx)
231 6 : return NULL;
232 3 :
233 18 : BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
234 3 : ctx->type = type;
235 18 : ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
236 6 : if (type == XHCI_CTX_TYPE_INPUT)
237 18 : ctx->size += CTX_SIZE(xhci->hcc_params);
238 :
239 6 : ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
240 9 : memset(ctx->bytes, 0, ctx->size);
241 3 : return ctx;
242 : }
243 :
244 : void xhci_free_container_ctx(struct xhci_hcd *xhci,
245 : struct xhci_container_ctx *ctx)
246 : {
247 88 : dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
248 44 : kfree(ctx);
249 44 : }
250 :
251 : struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
252 : struct xhci_container_ctx *ctx)
253 : {
254 238 : BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
255 68 : return (struct xhci_input_control_ctx *)ctx->bytes;
256 : }
257 :
258 : struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
259 : struct xhci_container_ctx *ctx)
260 : {
261 198 : if (ctx->type == XHCI_CTX_TYPE_DEVICE)
262 198 : return (struct xhci_slot_ctx *)ctx->bytes;
263 :
264 594 : return (struct xhci_slot_ctx *)
265 : (ctx->bytes + CTX_SIZE(xhci->hcc_params));
266 : }
267 :
268 : struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
269 : struct xhci_container_ctx *ctx,
270 : unsigned int ep_index)
271 : {
272 : /* increment ep index by offset of start of ep ctx array */
273 56 : ep_index++;
274 112 : if (ctx->type == XHCI_CTX_TYPE_INPUT)
275 56 : ep_index++;
276 :
277 336 : return (struct xhci_ep_ctx *)
278 : (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
279 : }
280 :
281 : static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
282 : struct xhci_virt_ep *ep)
283 : {
284 1 : init_timer(&ep->stop_cmd_timer);
285 1 : ep->stop_cmd_timer.data = (unsigned long) ep;
286 1 : ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
287 1 : ep->xhci = xhci;
288 1 : }
289 :
290 : /* All the xhci_tds in the ring's TD list should be freed at this point */
291 : void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
292 : {
293 8 : struct xhci_virt_device *dev;
294 8 : int i;
295 :
296 : /* Slot ID 0 is reserved */
297 32 : if (slot_id == 0 || !xhci->devs[slot_id])
298 8 : return;
299 :
300 8 : dev = xhci->devs[slot_id];
301 8 : xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
302 16 : if (!dev)
303 8 : return;
304 :
305 48 : for (i = 0; i < 31; ++i)
306 40 : if (dev->eps[i].ring)
307 32 : xhci_ring_free(xhci, dev->eps[i].ring);
308 :
309 24 : if (dev->ring_cache) {
310 40 : for (i = 0; i < dev->num_rings_cached; i++)
311 32 : xhci_ring_free(xhci, dev->ring_cache[i]);
312 32 : kfree(dev->ring_cache);
313 : }
314 :
315 48 : if (dev->in_ctx)
316 32 : xhci_free_container_ctx(xhci, dev->in_ctx);
317 72 : if (dev->out_ctx)
318 48 : xhci_free_container_ctx(xhci, dev->out_ctx);
319 :
320 32 : kfree(xhci->devs[slot_id]);
321 32 : xhci->devs[slot_id] = 0;
322 32 : }
323 :
324 : int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
325 : struct usb_device *udev, gfp_t flags)
326 : {
327 1 : struct xhci_virt_device *dev;
328 1 : int i;
329 1 :
330 1 : /* Slot ID 0 is reserved */
331 5 : if (slot_id == 0 || xhci->devs[slot_id]) {
332 9 : xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
333 2 : return 0;
334 1 : }
335 :
336 3 : xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
337 2 : if (!xhci->devs[slot_id])
338 1 : return 0;
339 1 : dev = xhci->devs[slot_id];
340 :
341 : /* Allocate the (output) device context that will be used in the HC. */
342 3 : dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
343 3 : if (!dev->out_ctx)
344 1 : goto fail;
345 :
346 : xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
347 : (unsigned long long)dev->out_ctx->dma);
348 :
349 : /* Allocate the (input) device context for address device command */
350 3 : dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
351 3 : if (!dev->in_ctx)
352 1 : goto fail;
353 :
354 : xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
355 : (unsigned long long)dev->in_ctx->dma);
356 :
357 : /* Initialize the cancellation list and watchdog timers for each ep */
358 5 : for (i = 0; i < 31; i++) {
359 3 : xhci_init_endpoint_timer(xhci, &dev->eps[i]);
360 4 : INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
361 : }
362 :
363 : /* Allocate endpoint 0 ring */
364 5 : dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
365 2 : if (!dev->eps[0].ring)
366 1 : goto fail;
367 :
368 : /* Allocate pointers to the ring cache */
369 3 : dev->ring_cache = kzalloc(
370 : sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
371 : flags);
372 3 : if (!dev->ring_cache)
373 1 : goto fail;
374 1 : dev->num_rings_cached = 0;
375 :
376 2 : init_completion(&dev->cmd_completion);
377 2 : INIT_LIST_HEAD(&dev->cmd_list);
378 :
379 : /* Point to output device context in dcbaa. */
380 1 : xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
381 : xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
382 : slot_id,
383 : &xhci->dcbaa->dev_context_ptrs[slot_id],
384 : (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
385 :
386 1 : return 1;
387 4 : fail:
388 24 : xhci_free_virt_device(xhci, slot_id);
389 1 : return 0;
390 : }
391 :
392 : /* Setup an xHCI virtual device for a Set Address command */
393 : int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
394 : {
395 1 : struct xhci_virt_device *dev;
396 1 : struct xhci_ep_ctx *ep0_ctx;
397 1 : struct usb_device *top_dev;
398 1 : struct xhci_slot_ctx *slot_ctx;
399 1 : struct xhci_input_control_ctx *ctrl_ctx;
400 1 :
401 2 : dev = xhci->devs[udev->slot_id];
402 1 : /* Slot ID 0 is reserved */
403 5 : if (udev->slot_id == 0 || !dev) {
404 8 : xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
405 : udev->slot_id);
406 1 : return -EINVAL;
407 : }
408 2 : ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
409 2 : ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
410 2 : slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
411 :
412 : /* 2) New slot context and endpoint 0 context are valid*/
413 1 : ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
414 :
415 : /* 3) Only the control endpoint is valid - one endpoint context */
416 1 : slot_ctx->dev_info |= LAST_CTX(1);
417 :
418 1 : slot_ctx->dev_info |= (u32) udev->route;
419 : switch (udev->speed) {
420 4 : case USB_SPEED_SUPER:
421 1 : slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
422 1 : break;
423 5 : case USB_SPEED_HIGH:
424 1 : slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
425 1 : break;
426 5 : case USB_SPEED_FULL:
427 1 : slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
428 1 : break;
429 5 : case USB_SPEED_LOW:
430 1 : slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
431 1 : break;
432 5 : case USB_SPEED_VARIABLE:
433 : xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
434 1 : return -EINVAL;
435 1 : break;
436 1 : default:
437 1 : /* Speed was set earlier, this shouldn't happen. */
438 2 : BUG();
439 : }
440 1 : /* Find the root hub port this device is under */
441 8 : for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
442 3 : top_dev = top_dev->parent)
443 2 : /* Found device below root hub */;
444 2 : slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
445 : xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
446 :
447 : /* Is this a LS/FS device under a HS hub? */
448 6 : if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
449 : udev->tt) {
450 2 : slot_ctx->tt_info = udev->tt->hub->slot_id;
451 1 : slot_ctx->tt_info |= udev->ttport << 8;
452 2 : if (udev->tt->multi)
453 1 : slot_ctx->dev_info |= DEV_MTT;
454 : }
455 : xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
456 : xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
457 :
458 : /* Step 4 - ring already allocated */
459 : /* Step 5 */
460 1 : ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
461 : /*
462 : * XXX: Not sure about wireless USB devices.
463 : */
464 : switch (udev->speed) {
465 4 : case USB_SPEED_SUPER:
466 1 : ep0_ctx->ep_info2 |= MAX_PACKET(512);
467 1 : break;
468 5 : case USB_SPEED_HIGH:
469 1 : /* USB core guesses at a 64-byte max packet first for FS devices */
470 4 : case USB_SPEED_FULL:
471 1 : ep0_ctx->ep_info2 |= MAX_PACKET(64);
472 1 : break;
473 5 : case USB_SPEED_LOW:
474 1 : ep0_ctx->ep_info2 |= MAX_PACKET(8);
475 1 : break;
476 5 : case USB_SPEED_VARIABLE:
477 : xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
478 1 : return -EINVAL;
479 1 : break;
480 1 : default:
481 1 : /* New speed? */
482 2 : BUG();
483 : }
484 1 : /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
485 1 : ep0_ctx->ep_info2 |= MAX_BURST(0);
486 2 : ep0_ctx->ep_info2 |= ERROR_COUNT(3);
487 :
488 1 : ep0_ctx->deq =
489 : dev->eps[0].ring->first_seg->dma;
490 2 : ep0_ctx->deq |= dev->eps[0].ring->cycle_state;
491 :
492 : /* Steps 7 and 8 were done in xhci_alloc_virt_device() */
493 :
494 1 : return 0;
495 : }
496 :
497 : /*
498 : * Convert interval expressed as 2^(bInterval - 1) == interval into
499 : * straight exponent value 2^n == interval.
500 : *
501 : */
502 : static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
503 : struct usb_host_endpoint *ep)
504 6 : {
505 6 : unsigned int interval;
506 6 :
507 96 : interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
508 18 : if (interval != ep->desc.bInterval - 1)
509 36 : dev_warn(&udev->dev,
510 30 : "ep %#x - rounding interval to %d %sframes\n",
511 : ep->desc.bEndpointAddress,
512 : 1 << interval,
513 : udev->speed == USB_SPEED_FULL ? "" : "micro");
514 :
515 36 : if (udev->speed == USB_SPEED_FULL) {
516 : /*
517 : * Full speed isoc endpoints specify interval in frames,
518 : * not microframes. We are using microframes everywhere,
519 : * so adjust accordingly.
520 : */
521 12 : interval += 3; /* 1 frame = 2^3 uframes */
522 : }
523 :
524 12 : return interval;
525 : }
526 :
527 : /*
528 : * Convert bInterval expressed in frames (in 1-255 range) to exponent of
529 : * microframes, rounded down to nearest power of 2.
530 : */
531 : static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
532 : struct usb_host_endpoint *ep)
533 4 : {
534 4 : unsigned int interval;
535 4 :
536 16 : interval = fls(8 * ep->desc.bInterval) - 1;
537 64 : interval = clamp_val(interval, 3, 10);
538 12 : if ((1 << interval) != 8 * ep->desc.bInterval)
539 20 : dev_warn(&udev->dev,
540 : "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
541 : ep->desc.bEndpointAddress,
542 : 1 << interval,
543 : 8 * ep->desc.bInterval);
544 :
545 8 : return interval;
546 : }
547 :
548 : /* Return the polling or NAK interval.
549 : *
550 : * The polling interval is expressed in "microframes". If xHCI's Interval field
551 : * is set to N, it will service the endpoint every 2^(Interval)*125us.
552 : *
553 : * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
554 : * is set to 0.
555 : */
556 : static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
557 : struct usb_host_endpoint *ep)
558 : {
559 4 : unsigned int interval = 0;
560 2 :
561 2 : switch (udev->speed) {
562 10 : case USB_SPEED_HIGH:
563 2 : /* Max NAK rate */
564 18 : if (usb_endpoint_xfer_control(&ep->desc) ||
565 2 : usb_endpoint_xfer_bulk(&ep->desc)) {
566 6 : interval = ep->desc.bInterval;
567 4 : break;
568 : }
569 : /* Fall through - SS and HS isoc/int have same decoding */
570 :
571 8 : case USB_SPEED_SUPER:
572 22 : if (usb_endpoint_xfer_int(&ep->desc) ||
573 : usb_endpoint_xfer_isoc(&ep->desc)) {
574 12 : interval = xhci_parse_exponent_interval(udev, ep);
575 : }
576 6 : break;
577 2 :
578 8 : case USB_SPEED_FULL:
579 8 : if (usb_endpoint_xfer_isoc(&ep->desc)) {
580 6 : interval = xhci_parse_exponent_interval(udev, ep);
581 2 : break;
582 : }
583 : /*
584 : * Fall through for interrupt endpoint interval decoding
585 2 : * since it uses the same rules as low speed interrupt
586 : * endpoints.
587 : */
588 :
589 8 : case USB_SPEED_LOW:
590 20 : if (usb_endpoint_xfer_int(&ep->desc) ||
591 : usb_endpoint_xfer_isoc(&ep->desc)) {
592 :
593 12 : interval = xhci_parse_frame_interval(udev, ep);
594 : }
595 6 : break;
596 2 :
597 2 : default:
598 6 : BUG();
599 : }
600 22 : return EP_INTERVAL(interval);
601 : }
602 2 :
603 : /* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
604 : * High speed endpoint descriptors can define "the number of additional
605 : * transaction opportunities per microframe", but that goes in the Max Burst
606 : * endpoint context field.
607 : */
608 : static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
609 : struct usb_host_endpoint *ep)
610 : {
611 6 : if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
612 1 : return 0;
613 1 : return ep->ss_ep_comp->desc.bmAttributes;
614 : }
615 :
616 : static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
617 : struct usb_host_endpoint *ep)
618 1 : {
619 1 : int in;
620 1 : u32 type;
621 1 :
622 3 : in = usb_endpoint_dir_in(&ep->desc);
623 5 : if (usb_endpoint_xfer_control(&ep->desc)) {
624 1 : type = EP_TYPE(CTRL_EP);
625 4 : } else if (usb_endpoint_xfer_bulk(&ep->desc)) {
626 2 : if (in)
627 1 : type = EP_TYPE(BULK_IN_EP);
628 : else
629 1 : type = EP_TYPE(BULK_OUT_EP);
630 4 : } else if (usb_endpoint_xfer_isoc(&ep->desc)) {
631 2 : if (in)
632 1 : type = EP_TYPE(ISOC_IN_EP);
633 : else
634 1 : type = EP_TYPE(ISOC_OUT_EP);
635 4 : } else if (usb_endpoint_xfer_int(&ep->desc)) {
636 2 : if (in)
637 1 : type = EP_TYPE(INT_IN_EP);
638 : else
639 1 : type = EP_TYPE(INT_OUT_EP);
640 : } else {
641 2 : BUG();
642 : }
643 4 : return type;
644 : }
645 :
646 : /* Return the maximum endpoint service interval time (ESIT) payload.
647 : * Basically, this is the maxpacket size, multiplied by the burst size
648 : * and mult size.
649 : */
650 : static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
651 : struct usb_device *udev,
652 : struct usb_host_endpoint *ep)
653 4 : {
654 4 : int max_burst;
655 4 : int max_packet;
656 4 :
657 4 : /* Only applies for interrupt or isochronous endpoints */
658 36 : if (usb_endpoint_xfer_control(&ep->desc) ||
659 4 : usb_endpoint_xfer_bulk(&ep->desc))
660 12 : return 0;
661 :
662 12 : if (udev->speed == USB_SPEED_SUPER) {
663 12 : if (ep->ss_ep_comp)
664 4 : return ep->ss_ep_comp->desc.wBytesPerInterval;
665 32 : xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
666 : /* Assume no bursts, no multiple opportunities to send. */
667 4 : return ep->desc.wMaxPacketSize;
668 : }
669 :
670 4 : max_packet = ep->desc.wMaxPacketSize & 0x3ff;
671 4 : max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
672 : /* A 0 in max burst means 1 transfer per ESIT */
673 4 : return max_packet * (max_burst + 1);
674 : }
675 :
676 : int xhci_endpoint_init(struct xhci_hcd *xhci,
677 : struct xhci_virt_device *virt_dev,
678 : struct usb_device *udev,
679 1 : struct usb_host_endpoint *ep,
680 1 : gfp_t mem_flags)
681 1 : {
682 1 : unsigned int ep_index;
683 1 : struct xhci_ep_ctx *ep_ctx;
684 1 : struct xhci_ring *ep_ring;
685 1 : unsigned int max_packet;
686 1 : unsigned int max_burst;
687 1 : u32 max_esit_payload;
688 1 :
689 4 : ep_index = xhci_get_endpoint_index(&ep->desc);
690 3 : ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
691 1 :
692 1 : /* Set up the endpoint ring */
693 6 : virt_dev->eps[ep_index].new_ring =
694 : xhci_ring_alloc(xhci, 1, true, mem_flags);
695 2 : if (!virt_dev->eps[ep_index].new_ring) {
696 : /* Attempt to use the ring cache */
697 2 : if (virt_dev->num_rings_cached == 0)
698 1 : return -ENOMEM;
699 2 : virt_dev->eps[ep_index].new_ring =
700 : virt_dev->ring_cache[virt_dev->num_rings_cached];
701 2 : virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
702 1 : virt_dev->num_rings_cached--;
703 2 : xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
704 : }
705 2 : ep_ring = virt_dev->eps[ep_index].new_ring;
706 4 : ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
707 :
708 22 : ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
709 3 : ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
710 :
711 : /* FIXME dig Mult and streams info out of ep companion desc */
712 :
713 : /* Allow 3 retries for everything but isoc;
714 : * error count = 0 means infinite retries.
715 : */
716 4 : if (!usb_endpoint_xfer_isoc(&ep->desc))
717 1 : ep_ctx->ep_info2 = ERROR_COUNT(3);
718 : else
719 1 : ep_ctx->ep_info2 = ERROR_COUNT(1);
720 :
721 6 : ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
722 :
723 : /* Set the max packet size and max burst */
724 : switch (udev->speed) {
725 4 : case USB_SPEED_SUPER:
726 1 : max_packet = ep->desc.wMaxPacketSize;
727 1 : ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
728 : /* dig out max burst from ep companion desc */
729 3 : if (!ep->ss_ep_comp) {
730 8 : xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
731 1 : max_packet = 0;
732 : } else {
733 1 : max_packet = ep->ss_ep_comp->desc.bMaxBurst;
734 : }
735 2 : ep_ctx->ep_info2 |= MAX_BURST(max_packet);
736 2 : break;
737 5 : case USB_SPEED_HIGH:
738 : /* bits 11:12 specify the number of additional transaction
739 : * opportunities per microframe (USB 2.0, section 9.6.6)
740 : */
741 8 : if (usb_endpoint_xfer_isoc(&ep->desc) ||
742 : usb_endpoint_xfer_int(&ep->desc)) {
743 2 : max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
744 2 : ep_ctx->ep_info2 |= MAX_BURST(max_burst);
745 : }
746 : /* Fall through */
747 4 : case USB_SPEED_FULL:
748 4 : case USB_SPEED_LOW:
749 5 : max_packet = ep->desc.wMaxPacketSize & 0x3ff;
750 6 : ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
751 3 : break;
752 2 : default:
753 3 : BUG();
754 : }
755 17 : max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
756 1 : ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
757 1 :
758 : /*
759 : * XXX no idea how to calculate the average TRB buffer length for bulk
760 : * endpoints, as the driver gives us no clue how big each scatter gather
761 : * list entry (or buffer) is going to be.
762 : *
763 : * For isochronous and interrupt endpoints, we set it to the max
764 : * available, until we have new API in the USB core to allow drivers to
765 : * declare how much bandwidth they actually need.
766 : *
767 : * Normally, it would be calculated by taking the total of the buffer
768 : * lengths in the TD and then dividing by the number of TRBs in a TD,
769 : * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
770 : * use Event Data TRBs, and we don't chain in a link TRB on short
771 : * transfers, we're basically dividing by 1.
772 : */
773 1 : ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
774 :
775 : /* FIXME Debug endpoint context */
776 1 : return 0;
777 : }
778 :
779 : void xhci_endpoint_zero(struct xhci_hcd *xhci,
780 : struct xhci_virt_device *virt_dev,
781 : struct usb_host_endpoint *ep)
782 1 : {
783 1 : unsigned int ep_index;
784 : struct xhci_ep_ctx *ep_ctx;
785 :
786 3 : ep_index = xhci_get_endpoint_index(&ep->desc);
787 2 : ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
788 :
789 1 : ep_ctx->ep_info = 0;
790 1 : ep_ctx->ep_info2 = 0;
791 1 : ep_ctx->deq = 0;
792 1 : ep_ctx->tx_info = 0;
793 1 : /* Don't free the endpoint ring until the set interface or configuration
794 : * request succeeds.
795 : */
796 : }
797 :
798 : /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
799 : * Useful when you want to change one particular aspect of the endpoint and then
800 : * issue a configure endpoint command.
801 : */
802 : void xhci_endpoint_copy(struct xhci_hcd *xhci,
803 : struct xhci_container_ctx *in_ctx,
804 : struct xhci_container_ctx *out_ctx,
805 5 : unsigned int ep_index)
806 5 : {
807 : struct xhci_ep_ctx *out_ep_ctx;
808 : struct xhci_ep_ctx *in_ep_ctx;
809 :
810 10 : out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
811 10 : in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
812 :
813 5 : in_ep_ctx->ep_info = out_ep_ctx->ep_info;
814 5 : in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
815 5 : in_ep_ctx->deq = out_ep_ctx->deq;
816 5 : in_ep_ctx->tx_info = out_ep_ctx->tx_info;
817 5 : }
818 :
819 : /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
820 : * Useful when you want to change one particular aspect of the endpoint and then
821 : * issue a configure endpoint command. Only the context entries field matters,
822 : * but we'll copy the whole thing anyway.
823 : */
824 : void xhci_slot_copy(struct xhci_hcd *xhci,
825 : struct xhci_container_ctx *in_ctx,
826 5 : struct xhci_container_ctx *out_ctx)
827 5 : {
828 : struct xhci_slot_ctx *in_slot_ctx;
829 : struct xhci_slot_ctx *out_slot_ctx;
830 :
831 10 : in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
832 10 : out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
833 :
834 5 : in_slot_ctx->dev_info = out_slot_ctx->dev_info;
835 5 : in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
836 5 : in_slot_ctx->tt_info = out_slot_ctx->tt_info;
837 5 : in_slot_ctx->dev_state = out_slot_ctx->dev_state;
838 5 : }
839 :
840 : /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
841 : static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
842 : {
843 1 : int i;
844 4 : struct device *dev = xhci_to_hcd(xhci)->self.controller;
845 2 : int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
846 1 :
847 1 : xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
848 1 :
849 3 : if (!num_sp)
850 2 : return 0;
851 1 :
852 4 : xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
853 4 : if (!xhci->scratchpad)
854 2 : goto fail_sp;
855 1 :
856 1 : xhci->scratchpad->sp_array =
857 5 : pci_alloc_consistent(to_pci_dev(dev),
858 : num_sp * sizeof(u64),
859 : &xhci->scratchpad->sp_dma);
860 3 : if (!xhci->scratchpad->sp_array)
861 1 : goto fail_sp2;
862 :
863 3 : xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
864 3 : if (!xhci->scratchpad->sp_buffers)
865 1 : goto fail_sp3;
866 :
867 3 : xhci->scratchpad->sp_dma_buffers =
868 : kzalloc(sizeof(dma_addr_t) * num_sp, flags);
869 :
870 3 : if (!xhci->scratchpad->sp_dma_buffers)
871 1 : goto fail_sp4;
872 :
873 1 : xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
874 5 : for (i = 0; i < num_sp; i++) {
875 2 : dma_addr_t dma;
876 6 : void *buf = pci_alloc_consistent(to_pci_dev(dev),
877 : xhci->page_size, &dma);
878 2 : if (!buf)
879 1 : goto fail_sp5;
880 :
881 1 : xhci->scratchpad->sp_array[i] = dma;
882 1 : xhci->scratchpad->sp_buffers[i] = buf;
883 1 : xhci->scratchpad->sp_dma_buffers[i] = dma;
884 : }
885 :
886 1 : return 0;
887 1 :
888 : fail_sp5:
889 5 : for (i = i - 1; i >= 0; i--) {
890 5 : pci_free_consistent(to_pci_dev(dev), xhci->page_size,
891 2 : xhci->scratchpad->sp_buffers[i],
892 : xhci->scratchpad->sp_dma_buffers[i]);
893 : }
894 2 : kfree(xhci->scratchpad->sp_dma_buffers);
895 :
896 1 : fail_sp4:
897 4 : kfree(xhci->scratchpad->sp_buffers);
898 :
899 2 : fail_sp3:
900 12 : pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64),
901 : xhci->scratchpad->sp_array,
902 : xhci->scratchpad->sp_dma);
903 1 :
904 : fail_sp2:
905 4 : kfree(xhci->scratchpad);
906 2 : xhci->scratchpad = NULL;
907 :
908 2 : fail_sp:
909 3 : return -ENOMEM;
910 : }
911 :
912 : static void scratchpad_free(struct xhci_hcd *xhci)
913 : {
914 4 : int num_sp;
915 4 : int i;
916 20 : struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
917 4 :
918 16 : if (!xhci->scratchpad)
919 4 : return;
920 :
921 4 : num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
922 :
923 20 : for (i = 0; i < num_sp; i++) {
924 16 : pci_free_consistent(pdev, xhci->page_size,
925 8 : xhci->scratchpad->sp_buffers[i],
926 : xhci->scratchpad->sp_dma_buffers[i]);
927 : }
928 8 : kfree(xhci->scratchpad->sp_dma_buffers);
929 8 : kfree(xhci->scratchpad->sp_buffers);
930 12 : pci_free_consistent(pdev, num_sp * sizeof(u64),
931 : xhci->scratchpad->sp_array,
932 : xhci->scratchpad->sp_dma);
933 8 : kfree(xhci->scratchpad);
934 4 : xhci->scratchpad = NULL;
935 4 : }
936 :
937 : struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
938 : bool allocate_completion, gfp_t mem_flags)
939 : {
940 1 : struct xhci_command *command;
941 1 :
942 4 : command = kzalloc(sizeof(*command), mem_flags);
943 2 : if (!command)
944 1 : return NULL;
945 :
946 3 : command->in_ctx =
947 : xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
948 3 : if (!command->in_ctx) {
949 1 : kfree(command);
950 1 : return NULL;
951 : }
952 :
953 2 : if (allocate_completion) {
954 3 : command->completion =
955 : kzalloc(sizeof(struct completion), mem_flags);
956 3 : if (!command->completion) {
957 2 : xhci_free_container_ctx(xhci, command->in_ctx);
958 1 : kfree(command);
959 1 : return NULL;
960 : }
961 2 : init_completion(command->completion);
962 : }
963 :
964 2 : command->status = 0;
965 4 : INIT_LIST_HEAD(&command->cmd_list);
966 1 : return command;
967 : }
968 :
969 : void xhci_free_command(struct xhci_hcd *xhci,
970 : struct xhci_command *command)
971 : {
972 6 : xhci_free_container_ctx(xhci,
973 : command->in_ctx);
974 6 : kfree(command->completion);
975 3 : kfree(command);
976 3 : }
977 :
978 : void xhci_mem_cleanup(struct xhci_hcd *xhci)
979 : {
980 10 : struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
981 2 : int size;
982 2 : int i;
983 2 :
984 2 : /* Free the Event Ring Segment Table and the actual Event Ring */
985 6 : if (xhci->ir_set) {
986 4 : xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
987 4 : xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
988 4 : xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
989 : }
990 4 : size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
991 8 : if (xhci->erst.entries)
992 8 : pci_free_consistent(pdev, size,
993 : xhci->erst.entries, xhci->erst.erst_dma_addr);
994 6 : xhci->erst.entries = NULL;
995 : xhci_dbg(xhci, "Freed ERST\n");
996 18 : if (xhci->event_ring)
997 18 : xhci_ring_free(xhci, xhci->event_ring);
998 8 : xhci->event_ring = NULL;
999 : xhci_dbg(xhci, "Freed event ring\n");
1000 :
1001 16 : xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
1002 6 : if (xhci->cmd_ring)
1003 6 : xhci_ring_free(xhci, xhci->cmd_ring);
1004 4 : xhci->cmd_ring = NULL;
1005 : xhci_dbg(xhci, "Freed command ring\n");
1006 :
1007 14 : for (i = 1; i < MAX_HC_SLOTS; ++i)
1008 14 : xhci_free_virt_device(xhci, i);
1009 4 :
1010 6 : if (xhci->segment_pool)
1011 2 : dma_pool_destroy(xhci->segment_pool);
1012 2 : xhci->segment_pool = NULL;
1013 : xhci_dbg(xhci, "Freed segment pool\n");
1014 :
1015 6 : if (xhci->device_pool)
1016 2 : dma_pool_destroy(xhci->device_pool);
1017 2 : xhci->device_pool = NULL;
1018 : xhci_dbg(xhci, "Freed device context pool\n");
1019 :
1020 4 : xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
1021 6 : if (xhci->dcbaa)
1022 6 : pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
1023 : xhci->dcbaa, xhci->dcbaa->dma);
1024 4 : xhci->dcbaa = NULL;
1025 :
1026 12 : scratchpad_free(xhci);
1027 2 : xhci->page_size = 0;
1028 2 : xhci->page_shift = 0;
1029 2 : }
1030 :
1031 : static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
1032 : struct xhci_segment *input_seg,
1033 : union xhci_trb *start_trb,
1034 : union xhci_trb *end_trb,
1035 : dma_addr_t input_dma,
1036 2 : struct xhci_segment *result_seg,
1037 2 : char *test_name, int test_number)
1038 2 : {
1039 2 : unsigned long long start_dma;
1040 2 : unsigned long long end_dma;
1041 2 : struct xhci_segment *seg;
1042 2 :
1043 6 : start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
1044 6 : end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
1045 2 :
1046 10 : seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
1047 6 : if (seg != result_seg) {
1048 18 : xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
1049 2 : test_name, test_number);
1050 18 : xhci_warn(xhci, "Tested TRB math w/ seg %p and "
1051 2 : "input DMA 0x%llx\n",
1052 2 : input_seg,
1053 2 : (unsigned long long) input_dma);
1054 18 : xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
1055 : "ending TRB %p (0x%llx DMA)\n",
1056 : start_trb, start_dma,
1057 : end_trb, end_dma);
1058 16 : xhci_warn(xhci, "Expected seg %p, got seg %p\n",
1059 : result_seg, seg);
1060 2 : return -1;
1061 : }
1062 2 : return 0;
1063 : }
1064 :
1065 : /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
1066 : static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
1067 : {
1068 1 : struct {
1069 1 : dma_addr_t input_dma;
1070 1 : struct xhci_segment *result_seg;
1071 18 : } simple_test_vector [] = {
1072 1 : /* A zeroed DMA field should fail */
1073 : { 0, NULL },
1074 : /* One TRB before the ring start should fail */
1075 : { xhci->event_ring->first_seg->dma - 16, NULL },
1076 : /* One byte before the ring start should fail */
1077 : { xhci->event_ring->first_seg->dma - 1, NULL },
1078 : /* Starting TRB should succeed */
1079 : { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
1080 : /* Ending TRB should succeed */
1081 : { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
1082 : xhci->event_ring->first_seg },
1083 : /* One byte after the ring end should fail */
1084 : { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
1085 : /* One TRB after the ring end should fail */
1086 : { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
1087 : /* An address of all ones should fail */
1088 : { (dma_addr_t) (~0), NULL },
1089 : };
1090 : struct {
1091 : struct xhci_segment *input_seg;
1092 : union xhci_trb *start_trb;
1093 : union xhci_trb *end_trb;
1094 : dma_addr_t input_dma;
1095 : struct xhci_segment *result_seg;
1096 41 : } complex_test_vector [] = {
1097 : /* Test feeding a valid DMA address from a different ring */
1098 : { .input_seg = xhci->event_ring->first_seg,
1099 : .start_trb = xhci->event_ring->first_seg->trbs,
1100 : .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1101 : .input_dma = xhci->cmd_ring->first_seg->dma,
1102 : .result_seg = NULL,
1103 : },
1104 : /* Test feeding a valid end TRB from a different ring */
1105 : { .input_seg = xhci->event_ring->first_seg,
1106 : .start_trb = xhci->event_ring->first_seg->trbs,
1107 : .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1108 : .input_dma = xhci->cmd_ring->first_seg->dma,
1109 : .result_seg = NULL,
1110 : },
1111 : /* Test feeding a valid start and end TRB from a different ring */
1112 : { .input_seg = xhci->event_ring->first_seg,
1113 : .start_trb = xhci->cmd_ring->first_seg->trbs,
1114 : .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1115 : .input_dma = xhci->cmd_ring->first_seg->dma,
1116 : .result_seg = NULL,
1117 : },
1118 : /* TRB in this ring, but after this TD */
1119 : { .input_seg = xhci->event_ring->first_seg,
1120 : .start_trb = &xhci->event_ring->first_seg->trbs[0],
1121 : .end_trb = &xhci->event_ring->first_seg->trbs[3],
1122 : .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1123 : .result_seg = NULL,
1124 : },
1125 : /* TRB in this ring, but before this TD */
1126 : { .input_seg = xhci->event_ring->first_seg,
1127 : .start_trb = &xhci->event_ring->first_seg->trbs[3],
1128 : .end_trb = &xhci->event_ring->first_seg->trbs[6],
1129 : .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1130 : .result_seg = NULL,
1131 : },
1132 : /* TRB in this ring, but after this wrapped TD */
1133 : { .input_seg = xhci->event_ring->first_seg,
1134 : .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1135 : .end_trb = &xhci->event_ring->first_seg->trbs[1],
1136 : .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1137 : .result_seg = NULL,
1138 : },
1139 : /* TRB in this ring, but before this wrapped TD */
1140 : { .input_seg = xhci->event_ring->first_seg,
1141 : .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1142 : .end_trb = &xhci->event_ring->first_seg->trbs[1],
1143 : .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1144 : .result_seg = NULL,
1145 : },
1146 : /* TRB not in this ring, and we have a wrapped TD */
1147 : { .input_seg = xhci->event_ring->first_seg,
1148 : .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1149 : .end_trb = &xhci->event_ring->first_seg->trbs[1],
1150 : .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1151 : .result_seg = NULL,
1152 : },
1153 : };
1154 :
1155 : unsigned int num_tests;
1156 : int i, ret;
1157 :
1158 1 : num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]);
1159 5 : for (i = 0; i < num_tests; i++) {
1160 5 : ret = xhci_test_trb_in_td(xhci,
1161 1 : xhci->event_ring->first_seg,
1162 : xhci->event_ring->first_seg->trbs,
1163 : &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1164 : simple_test_vector[i].input_dma,
1165 : simple_test_vector[i].result_seg,
1166 : "Simple", i);
1167 2 : if (ret < 0)
1168 1 : return ret;
1169 : }
1170 :
1171 1 : num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]);
1172 5 : for (i = 0; i < num_tests; i++) {
1173 5 : ret = xhci_test_trb_in_td(xhci,
1174 1 : complex_test_vector[i].input_seg,
1175 : complex_test_vector[i].start_trb,
1176 : complex_test_vector[i].end_trb,
1177 : complex_test_vector[i].input_dma,
1178 : complex_test_vector[i].result_seg,
1179 : "Complex", i);
1180 2 : if (ret < 0)
1181 1 : return ret;
1182 : }
1183 : xhci_dbg(xhci, "TRB math tests passed.\n");
1184 1 : return 0;
1185 : }
1186 :
1187 :
1188 : int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1189 : {
1190 1 : dma_addr_t dma;
1191 4 : struct device *dev = xhci_to_hcd(xhci)->self.controller;
1192 1 : unsigned int val, val2;
1193 1 : u64 val_64;
1194 1 : struct xhci_segment *seg;
1195 1 : u32 page_size;
1196 1 : int i;
1197 1 :
1198 3 : page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
1199 1 : xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
1200 6 : for (i = 0; i < 16; i++) {
1201 5 : if ((0x1 & page_size) != 0)
1202 3 : break;
1203 2 : page_size = page_size >> 1;
1204 1 : }
1205 3 : if (i < 16)
1206 2 : xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
1207 1 : else
1208 9 : xhci_warn(xhci, "WARN: no supported page size\n");
1209 1 : /* Use 4K pages, since that's common and the minimum the HC supports */
1210 3 : xhci->page_shift = 12;
1211 3 : xhci->page_size = 1 << xhci->page_shift;
1212 1 : xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
1213 1 :
1214 1 : /*
1215 : * Program the Number of Device Slots Enabled field in the CONFIG
1216 : * register with the max value of slots the HC can handle.
1217 : */
1218 5 : val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
1219 : xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
1220 : (unsigned int) val);
1221 2 : val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
1222 1 : val |= (val2 & ~HCS_SLOTS_MASK);
1223 : xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
1224 : (unsigned int) val);
1225 2 : xhci_writel(xhci, val, &xhci->op_regs->config_reg);
1226 :
1227 : /*
1228 : * Section 5.4.8 - doorbell array must be
1229 : * "physically contiguous and 64-byte (cache line) aligned".
1230 : */
1231 4 : xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev),
1232 : sizeof(*xhci->dcbaa), &dma);
1233 3 : if (!xhci->dcbaa)
1234 1 : goto fail;
1235 2 : memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
1236 1 : xhci->dcbaa->dma = dma;
1237 : xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
1238 : (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
1239 2 : xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
1240 :
1241 : /*
1242 : * Initialize the ring segment pool. The ring must be a contiguous
1243 : * structure comprised of TRBs. The TRBs must be 16 byte aligned,
1244 : * however, the command ring segment needs 64-byte aligned segments,
1245 : * so we pick the greater alignment need.
1246 : */
1247 2 : xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
1248 : SEGMENT_SIZE, 64, xhci->page_size);
1249 :
1250 : /* See Table 46 and Note on Figure 55 */
1251 2 : xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
1252 : 2112, 64, xhci->page_size);
1253 6 : if (!xhci->segment_pool || !xhci->device_pool)
1254 1 : goto fail;
1255 :
1256 : /* Set up the command ring to have one segments for now. */
1257 5 : xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags);
1258 3 : if (!xhci->cmd_ring)
1259 1 : goto fail;
1260 : xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
1261 : xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
1262 : (unsigned long long)xhci->cmd_ring->first_seg->dma);
1263 :
1264 : /* Set the address in the Command Ring Control register */
1265 2 : val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1266 2 : val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
1267 : (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
1268 : xhci->cmd_ring->cycle_state;
1269 : xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
1270 2 : xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
1271 2 : xhci_dbg_cmd_ptrs(xhci);
1272 :
1273 2 : val = xhci_readl(xhci, &xhci->cap_regs->db_off);
1274 1 : val &= DBOFF_MASK;
1275 : xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
1276 : " from cap regs base addr\n", val);
1277 1 : xhci->dba = (void *) xhci->cap_regs + val;
1278 2 : xhci_dbg_regs(xhci);
1279 2 : xhci_print_run_regs(xhci);
1280 : /* Set ir_set to interrupt register set 0 */
1281 1 : xhci->ir_set = (void *) xhci->run_regs->ir_set;
1282 :
1283 : /*
1284 : * Event ring setup: Allocate a normal ring, but also setup
1285 : * the event ring segment table (ERST). Section 4.9.3.
1286 : */
1287 : xhci_dbg(xhci, "// Allocating event ring\n");
1288 5 : xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
1289 3 : if (!xhci->event_ring)
1290 1 : goto fail;
1291 6 : if (xhci_check_trb_in_td_math(xhci, flags) < 0)
1292 1 : goto fail;
1293 :
1294 4 : xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
1295 : sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
1296 2 : if (!xhci->erst.entries)
1297 1 : goto fail;
1298 : xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
1299 : (unsigned long long)dma);
1300 :
1301 1 : memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
1302 1 : xhci->erst.num_entries = ERST_NUM_SEGS;
1303 1 : xhci->erst.erst_dma_addr = dma;
1304 : xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
1305 : xhci->erst.num_entries,
1306 : xhci->erst.entries,
1307 : (unsigned long long)xhci->erst.erst_dma_addr);
1308 :
1309 : /* set ring base address and size for each segment table entry */
1310 6 : for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
1311 3 : struct xhci_erst_entry *entry = &xhci->erst.entries[val];
1312 2 : entry->seg_addr = seg->dma;
1313 1 : entry->seg_size = TRBS_PER_SEGMENT;
1314 1 : entry->rsvd = 0;
1315 1 : seg = seg->next;
1316 : }
1317 :
1318 : /* set ERST count with the number of entries in the segment table */
1319 2 : val = xhci_readl(xhci, &xhci->ir_set->erst_size);
1320 1 : val &= ERST_SIZE_MASK;
1321 1 : val |= ERST_NUM_SEGS;
1322 : xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
1323 : val);
1324 2 : xhci_writel(xhci, val, &xhci->ir_set->erst_size);
1325 :
1326 : xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
1327 : /* set the segment table base address */
1328 : xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
1329 : (unsigned long long)xhci->erst.erst_dma_addr);
1330 2 : val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
1331 1 : val_64 &= ERST_PTR_MASK;
1332 1 : val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
1333 2 : xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
1334 :
1335 : /* Set the event ring dequeue address */
1336 2 : xhci_set_hc_event_deq(xhci);
1337 : xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
1338 3 : xhci_print_ir_set(xhci, xhci->ir_set, 0);
1339 :
1340 : /*
1341 : * XXX: Might need to set the Interrupter Moderation Register to
1342 : * something other than the default (~1ms minimum between interrupts).
1343 : * See section 5.5.1.2.
1344 : */
1345 2 : init_completion(&xhci->addr_dev);
1346 5 : for (i = 0; i < MAX_HC_SLOTS; ++i)
1347 3 : xhci->devs[i] = 0;
1348 1 :
1349 8 : if (scratchpad_alloc(xhci, flags))
1350 1 : goto fail;
1351 :
1352 1 : return 0;
1353 7 :
1354 : fail:
1355 20 : xhci_warn(xhci, "Couldn't initialize memory\n");
1356 2 : xhci_mem_cleanup(xhci);
1357 1 : return -ENOMEM;
1358 : }
|