Line data Source code
1 : /*
2 : * Copyright (c) 2001-2004 by David Brownell
3 : * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
4 : *
5 : * This program is free software; you can redistribute it and/or modify it
6 : * under the terms of the GNU General Public License as published by the
7 : * Free Software Foundation; either version 2 of the License, or (at your
8 : * option) any later version.
9 : *
10 : * This program is distributed in the hope that it will be useful, but
11 : * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 : * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 : * for more details.
14 : *
15 : * You should have received a copy of the GNU General Public License
16 : * along with this program; if not, write to the Free Software Foundation,
17 : * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 : */
19 :
20 : /* this file is part of ehci-hcd.c */
21 :
22 : /*-------------------------------------------------------------------------*/
23 :
24 : /*
25 : * EHCI scheduled transaction support: interrupt, iso, split iso
26 : * These are called "periodic" transactions in the EHCI spec.
27 : *
28 : * Note that for interrupt transfers, the QH/QTD manipulation is shared
29 : * with the "asynchronous" transaction support (control/bulk transfers).
30 : * The only real difference is in how interrupt transfers are scheduled.
31 : *
32 : * For ISO, we make an "iso_stream" head to serve the same role as a QH.
33 : * It keeps track of every ITD (or SITD) that's linked, and holds enough
34 : * pre-calculated schedule data to make appending to the queue be quick.
35 : */
36 :
37 : static int ehci_get_frame (struct usb_hcd *hcd);
38 :
39 : /*-------------------------------------------------------------------------*/
40 :
41 : /*
42 : * periodic_next_shadow - return "next" pointer on shadow list
43 : * @periodic: host pointer to qh/itd/sitd
44 : * @tag: hardware tag for type of this record
45 : */
46 : static union ehci_shadow *
47 : periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
48 : __hc32 tag)
49 : {
50 159 : switch (hc32_to_cpu(ehci, tag)) {
51 159 : case Q_TYPE_QH:
52 53 : return &periodic->qh->qh_next;
53 212 : case Q_TYPE_FSTN:
54 53 : return &periodic->fstn->fstn_next;
55 212 : case Q_TYPE_ITD:
56 53 : return &periodic->itd->itd_next;
57 53 : // case Q_TYPE_SITD:
58 53 : default:
59 106 : return &periodic->sitd->sitd_next;
60 : }
61 : }
62 :
63 : static __hc32 *
64 : shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
65 : __hc32 tag)
66 : {
67 159 : switch (hc32_to_cpu(ehci, tag)) {
68 : /* our ehci_shadow.qh is actually software part */
69 159 : case Q_TYPE_QH:
70 53 : return &periodic->qh->hw->hw_next;
71 53 : /* others are hw parts */
72 53 : default:
73 106 : return periodic->hw_next;
74 : }
75 : }
76 :
77 : /* caller must hold ehci->lock */
78 : static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
79 : {
80 16 : union ehci_shadow *prev_p = &ehci->pshadow[frame];
81 16 : __hc32 *hw_p = &ehci->periodic[frame];
82 16 : union ehci_shadow here = *prev_p;
83 8 :
84 8 : /* find predecessor of "ptr"; hw and shadow lists are in sync */
85 48 : while (here.ptr && here.ptr != ptr) {
86 48 : prev_p = periodic_next_shadow(ehci, prev_p,
87 16 : Q_NEXT_TYPE(ehci, *hw_p));
88 40 : hw_p = shadow_next_periodic(ehci, &here,
89 : Q_NEXT_TYPE(ehci, *hw_p));
90 8 : here = *prev_p;
91 : }
92 8 : /* an interrupt entry (at list end) could have been shared */
93 16 : if (!here.ptr)
94 8 : return;
95 :
96 : /* update shadow and hardware lists ... the old "next" pointers
97 : * from ptr may still be in use, the caller updates them.
98 : */
99 40 : *prev_p = *periodic_next_shadow(ehci, &here,
100 : Q_NEXT_TYPE(ehci, *hw_p));
101 40 : *hw_p = *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p));
102 8 : }
103 :
104 : /* how many of the uframe's 125 usecs are allocated? */
105 : static unsigned short
106 : periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
107 : {
108 222 : __hc32 *hw_p = &ehci->periodic [frame];
109 444 : union ehci_shadow *q = &ehci->pshadow [frame];
110 444 : unsigned usecs = 0;
111 222 : struct ehci_qh_hw *hw;
112 222 :
113 1110 : while (q->ptr) {
114 1332 : switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
115 1110 : case Q_TYPE_QH:
116 444 : hw = q->qh->hw;
117 222 : /* is it in the S-mask? */
118 1110 : if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
119 666 : usecs += q->qh->usecs;
120 222 : /* ... or C-mask? */
121 888 : if (hw->hw_info2 & cpu_to_hc32(ehci,
122 : 1 << (8 + uframe)))
123 444 : usecs += q->qh->c_usecs;
124 222 : hw_p = &hw->hw_next;
125 222 : q = &q->qh->qh_next;
126 222 : break;
127 222 : // case Q_TYPE_FSTN:
128 222 : default:
129 222 : /* for "save place" FSTNs, count the relevant INTR
130 : * bandwidth from the previous frame
131 : */
132 444 : if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) {
133 : ehci_dbg (ehci, "ignoring FSTN cost ...\n");
134 : }
135 222 : hw_p = &q->fstn->hw_next;
136 222 : q = &q->fstn->fstn_next;
137 222 : break;
138 888 : case Q_TYPE_ITD:
139 444 : if (q->itd->hw_transaction[uframe])
140 444 : usecs += q->itd->stream->usecs;
141 222 : hw_p = &q->itd->hw_next;
142 222 : q = &q->itd->itd_next;
143 222 : break;
144 888 : case Q_TYPE_SITD:
145 : /* is it in the S-mask? (count SPLIT, DATA) */
146 888 : if (q->sitd->hw_uframe & cpu_to_hc32(ehci,
147 : 1 << uframe)) {
148 888 : if (q->sitd->hw_fullspeed_ep &
149 : cpu_to_hc32(ehci, 1<<31))
150 444 : usecs += q->sitd->stream->usecs;
151 : else /* worst case for OUT start-split */
152 222 : usecs += HS_USECS_ISO (188);
153 : }
154 :
155 : /* ... C-mask? (count CSPLIT, DATA) */
156 1332 : if (q->sitd->hw_uframe &
157 : cpu_to_hc32(ehci, 1 << (8 + uframe))) {
158 : /* worst case for IN complete-split */
159 444 : usecs += q->sitd->stream->c_usecs;
160 : }
161 :
162 222 : hw_p = &q->sitd->hw_next;
163 222 : q = &q->sitd->sitd_next;
164 222 : break;
165 : }
166 : }
167 : #ifdef DEBUG
168 888 : if (usecs > 100)
169 : ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
170 : frame * 8 + uframe, usecs);
171 : #endif
172 222 : return usecs;
173 : }
174 :
175 : /*-------------------------------------------------------------------------*/
176 :
177 : static int same_tt (struct usb_device *dev1, struct usb_device *dev2)
178 : {
179 456 : if (!dev1->tt || !dev2->tt)
180 76 : return 0;
181 304 : if (dev1->tt != dev2->tt)
182 76 : return 0;
183 152 : if (dev1->tt->multi)
184 76 : return dev1->ttport == dev2->ttport;
185 : else
186 76 : return 1;
187 : }
188 :
189 : #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
190 :
191 : /* Which uframe does the low/fullspeed transfer start in?
192 : *
193 : * The parameter is the mask of ssplits in "H-frame" terms
194 : * and this returns the transfer start uframe in "B-frame" terms,
195 : * which allows both to match, e.g. a ssplit in "H-frame" uframe 0
196 : * will cause a transfer in "B-frame" uframe 0. "B-frames" lag
197 : * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7.
198 : */
199 : static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
200 : {
201 : unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask);
202 : if (!smask) {
203 : ehci_err(ehci, "invalid empty smask!\n");
204 : /* uframe 7 can't have bw so this will indicate failure */
205 : return 7;
206 : }
207 : return ffs(smask) - 1;
208 : }
209 :
210 : static const unsigned char
211 : max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
212 :
213 : /* carryover low/fullspeed bandwidth that crosses uframe boundries */
214 : static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
215 : {
216 : int i;
217 : for (i=0; i<7; i++) {
218 : if (max_tt_usecs[i] < tt_usecs[i]) {
219 : tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
220 : tt_usecs[i] = max_tt_usecs[i];
221 : }
222 : }
223 : }
224 :
225 : /* How many of the tt's periodic downstream 1000 usecs are allocated?
226 : *
227 : * While this measures the bandwidth in terms of usecs/uframe,
228 : * the low/fullspeed bus has no notion of uframes, so any particular
229 : * low/fullspeed transfer can "carry over" from one uframe to the next,
230 : * since the TT just performs downstream transfers in sequence.
231 : *
232 : * For example two separate 100 usec transfers can start in the same uframe,
233 : * and the second one would "carry over" 75 usecs into the next uframe.
234 : */
235 : static void
236 : periodic_tt_usecs (
237 : struct ehci_hcd *ehci,
238 : struct usb_device *dev,
239 : unsigned frame,
240 : unsigned short tt_usecs[8]
241 : )
242 : {
243 : __hc32 *hw_p = &ehci->periodic [frame];
244 : union ehci_shadow *q = &ehci->pshadow [frame];
245 : unsigned char uf;
246 :
247 : memset(tt_usecs, 0, 16);
248 :
249 : while (q->ptr) {
250 : switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
251 : case Q_TYPE_ITD:
252 : hw_p = &q->itd->hw_next;
253 : q = &q->itd->itd_next;
254 : continue;
255 : case Q_TYPE_QH:
256 : if (same_tt(dev, q->qh->dev)) {
257 : uf = tt_start_uframe(ehci, q->qh->hw->hw_info2);
258 : tt_usecs[uf] += q->qh->tt_usecs;
259 : }
260 : hw_p = &q->qh->hw->hw_next;
261 : q = &q->qh->qh_next;
262 : continue;
263 : case Q_TYPE_SITD:
264 : if (same_tt(dev, q->sitd->urb->dev)) {
265 : uf = tt_start_uframe(ehci, q->sitd->hw_uframe);
266 : tt_usecs[uf] += q->sitd->stream->tt_usecs;
267 : }
268 : hw_p = &q->sitd->hw_next;
269 : q = &q->sitd->sitd_next;
270 : continue;
271 : // case Q_TYPE_FSTN:
272 : default:
273 : ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n",
274 : frame);
275 : hw_p = &q->fstn->hw_next;
276 : q = &q->fstn->fstn_next;
277 : }
278 : }
279 :
280 : carryover_tt_bandwidth(tt_usecs);
281 :
282 : if (max_tt_usecs[7] < tt_usecs[7])
283 : ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n",
284 : frame, tt_usecs[7] - max_tt_usecs[7]);
285 : }
286 :
287 : /*
288 : * Return true if the device's tt's downstream bus is available for a
289 : * periodic transfer of the specified length (usecs), starting at the
290 : * specified frame/uframe. Note that (as summarized in section 11.19
291 : * of the usb 2.0 spec) TTs can buffer multiple transactions for each
292 : * uframe.
293 : *
294 : * The uframe parameter is when the fullspeed/lowspeed transfer
295 : * should be executed in "B-frame" terms, which is the same as the
296 : * highspeed ssplit's uframe (which is in "H-frame" terms). For example
297 : * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
298 : * See the EHCI spec sec 4.5 and fig 4.7.
299 : *
300 : * This checks if the full/lowspeed bus, at the specified starting uframe,
301 : * has the specified bandwidth available, according to rules listed
302 : * in USB 2.0 spec section 11.18.1 fig 11-60.
303 : *
304 : * This does not check if the transfer would exceed the max ssplit
305 : * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
306 : * since proper scheduling limits ssplits to less than 16 per uframe.
307 : */
308 : static int tt_available (
309 : struct ehci_hcd *ehci,
310 : unsigned period,
311 : struct usb_device *dev,
312 : unsigned frame,
313 : unsigned uframe,
314 : u16 usecs
315 : )
316 : {
317 : if ((period == 0) || (uframe >= 7)) /* error */
318 : return 0;
319 :
320 : for (; frame < ehci->periodic_size; frame += period) {
321 : unsigned short tt_usecs[8];
322 :
323 : periodic_tt_usecs (ehci, dev, frame, tt_usecs);
324 :
325 : ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in"
326 : " schedule %d/%d/%d/%d/%d/%d/%d/%d\n",
327 : frame, usecs, uframe,
328 : tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3],
329 : tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]);
330 :
331 : if (max_tt_usecs[uframe] <= tt_usecs[uframe]) {
332 : ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n",
333 : frame, uframe);
334 : return 0;
335 : }
336 :
337 : /* special case for isoc transfers larger than 125us:
338 : * the first and each subsequent fully used uframe
339 : * must be empty, so as to not illegally delay
340 : * already scheduled transactions
341 : */
342 : if (125 < usecs) {
343 : int ufs = (usecs / 125);
344 : int i;
345 : for (i = uframe; i < (uframe + ufs) && i < 8; i++)
346 : if (0 < tt_usecs[i]) {
347 : ehci_vdbg(ehci,
348 : "multi-uframe xfer can't fit "
349 : "in frame %d uframe %d\n",
350 : frame, i);
351 : return 0;
352 : }
353 : }
354 :
355 : tt_usecs[uframe] += usecs;
356 :
357 : carryover_tt_bandwidth(tt_usecs);
358 :
359 : /* fail if the carryover pushed bw past the last uframe's limit */
360 : if (max_tt_usecs[7] < tt_usecs[7]) {
361 : ehci_vdbg(ehci,
362 : "tt unavailable usecs %d frame %d uframe %d\n",
363 : usecs, frame, uframe);
364 : return 0;
365 : }
366 : }
367 :
368 : return 1;
369 : }
370 :
371 : #else
372 :
373 : /* return true iff the device's transaction translator is available
374 : * for a periodic transfer starting at the specified frame, using
375 : * all the uframes in the mask.
376 : */
377 : static int tt_no_collision (
378 : struct ehci_hcd *ehci,
379 : unsigned period,
380 38 : struct usb_device *dev,
381 38 : unsigned frame,
382 38 : u32 uf_mask
383 38 : )
384 38 : {
385 114 : if (period == 0) /* error */
386 76 : return 0;
387 38 :
388 38 : /* note bandwidth wastage: split never follows csplit
389 38 : * (different dev or endpoint) until the next uframe.
390 38 : * calling convention doesn't make that distinction.
391 38 : */
392 190 : for (; frame < ehci->periodic_size; frame += period) {
393 76 : union ehci_shadow here;
394 38 : __hc32 type;
395 : struct ehci_qh_hw *hw;
396 :
397 38 : here = ehci->pshadow [frame];
398 114 : type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
399 114 : while (here.ptr) {
400 114 : switch (hc32_to_cpu(ehci, type)) {
401 152 : case Q_TYPE_ITD:
402 114 : type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
403 38 : here = here.itd->itd_next;
404 38 : continue;
405 152 : case Q_TYPE_QH:
406 38 : hw = here.qh->hw;
407 152 : if (same_tt (dev, here.qh->dev)) {
408 : u32 mask;
409 :
410 76 : mask = hc32_to_cpu(ehci,
411 : hw->hw_info2);
412 : /* "knows" no gap is needed */
413 38 : mask |= mask >> 8;
414 76 : if (mask & uf_mask)
415 38 : break;
416 : }
417 190 : type = Q_NEXT_TYPE(ehci, hw->hw_next);
418 38 : here = here.qh->qh_next;
419 38 : continue;
420 152 : case Q_TYPE_SITD:
421 152 : if (same_tt (dev, here.sitd->urb->dev)) {
422 : u16 mask;
423 :
424 114 : mask = hc32_to_cpu(ehci, here.sitd
425 : ->hw_uframe);
426 : /* FIXME assumes no gap for IN! */
427 38 : mask |= mask >> 8;
428 76 : if (mask & uf_mask)
429 38 : break;
430 : }
431 190 : type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
432 38 : here = here.sitd->sitd_next;
433 38 : continue;
434 38 : // case Q_TYPE_FSTN:
435 76 : default:
436 38 : ehci_dbg (ehci,
437 38 : "periodic frame %d bogus type %d\n",
438 : frame, type);
439 : }
440 :
441 : /* collision or error */
442 114 : return 0;
443 38 : }
444 : }
445 :
446 : /* no collision */
447 38 : return 1;
448 : }
449 :
450 : #endif /* CONFIG_USB_EHCI_TT_NEWSCHED */
451 :
452 : /*-------------------------------------------------------------------------*/
453 :
454 : static int enable_periodic (struct ehci_hcd *ehci)
455 : {
456 38 : u32 cmd;
457 38 : int status;
458 38 :
459 190 : if (ehci->periodic_sched++)
460 76 : return 0;
461 38 :
462 38 : /* did clearing PSE did take effect yet?
463 : * takes effect only at frame boundaries...
464 : */
465 114 : status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
466 : STS_PSS, 0, 9 * 125);
467 76 : if (status)
468 38 : return status;
469 :
470 114 : cmd = ehci_readl(ehci, &ehci->regs->command) | CMD_PSE;
471 76 : ehci_writel(ehci, cmd, &ehci->regs->command);
472 : /* posted write ... PSS happens later */
473 114 : ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
474 :
475 : /* make sure ehci_work scans these */
476 114 : ehci->next_uframe = ehci_readl(ehci, &ehci->regs->frame_index)
477 : % (ehci->periodic_size << 3);
478 152 : if (unlikely(ehci->broken_periodic))
479 38 : ehci->last_periodic_enable = ktime_get_real();
480 38 : return 0;
481 : }
482 :
483 : static int disable_periodic (struct ehci_hcd *ehci)
484 : {
485 14 : u32 cmd;
486 14 : int status;
487 14 :
488 56 : if (--ehci->periodic_sched)
489 28 : return 0;
490 14 :
491 70 : if (unlikely(ehci->broken_periodic)) {
492 14 : /* delay experimentally determined */
493 56 : ktime_t safe = ktime_add_us(ehci->last_periodic_enable, 1000);
494 42 : ktime_t now = ktime_get_real();
495 56 : s64 delay = ktime_us_delta(safe, now);
496 :
497 56 : if (unlikely(delay > 0))
498 14 : udelay(delay);
499 : }
500 :
501 : /* did setting PSE not take effect yet?
502 : * takes effect only at frame boundaries...
503 : */
504 84 : status = handshake_on_error_set_halt(ehci, &ehci->regs->status,
505 : STS_PSS, STS_PSS, 9 * 125);
506 28 : if (status)
507 14 : return status;
508 :
509 42 : cmd = ehci_readl(ehci, &ehci->regs->command) & ~CMD_PSE;
510 28 : ehci_writel(ehci, cmd, &ehci->regs->command);
511 : /* posted write ... */
512 :
513 14 : ehci->next_uframe = -1;
514 14 : return 0;
515 : }
516 :
517 : /*-------------------------------------------------------------------------*/
518 :
519 : /* periodic schedule slots have iso tds (normal or split) first, then a
520 : * sparse tree for active interrupt transfers.
521 : *
522 : * this just links in a qh; caller guarantees uframe masks are set right.
523 : * no FSTN support (yet; ehci 0.96+)
524 : */
525 : static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
526 : {
527 36 : unsigned i;
528 108 : unsigned period = qh->period;
529 36 :
530 36 : dev_dbg (&qh->dev->dev,
531 36 : "link qh%d-%04x/%p start %d [%d/%d us]\n",
532 36 : period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
533 36 : & (QH_CMASK | QH_SMASK),
534 36 : qh, qh->start, qh->usecs, qh->c_usecs);
535 36 :
536 36 : /* high bandwidth, or otherwise every microframe */
537 72 : if (period == 0)
538 36 : period = 1;
539 :
540 252 : for (i = qh->start; i < ehci->periodic_size; i += period) {
541 144 : union ehci_shadow *prev = &ehci->pshadow[i];
542 72 : __hc32 *hw_p = &ehci->periodic[i];
543 36 : union ehci_shadow here = *prev;
544 36 : __hc32 type = 0;
545 :
546 : /* skip the iso nodes at list head */
547 108 : while (here.ptr) {
548 144 : type = Q_NEXT_TYPE(ehci, *hw_p);
549 180 : if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
550 36 : break;
551 72 : prev = periodic_next_shadow(ehci, prev, type);
552 72 : hw_p = shadow_next_periodic(ehci, &here, type);
553 72 : here = *prev;
554 : }
555 36 :
556 : /* sorting each branch by period (slow-->fast)
557 : * enables sharing interior tree nodes
558 : */
559 216 : while (here.ptr && qh != here.qh) {
560 180 : if (qh->period > here.qh->period)
561 72 : break;
562 36 : prev = &here.qh->qh_next;
563 36 : hw_p = &here.qh->hw->hw_next;
564 36 : here = *prev;
565 72 : }
566 : /* link in this qh, unless some earlier pass did that */
567 72 : if (qh != here.qh) {
568 36 : qh->qh_next = here;
569 72 : if (here.qh)
570 36 : qh->hw->hw_next = *hw_p;
571 36 : wmb ();
572 36 : prev->qh = qh;
573 108 : *hw_p = QH_NEXT (ehci, qh->qh_dma);
574 : }
575 : }
576 36 : qh->qh_state = QH_STATE_LINKED;
577 36 : qh->xacterrs = 0;
578 72 : qh_get (qh);
579 :
580 : /* update per-qh bandwidth for usbfs */
581 468 : ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
582 : ? ((qh->usecs + qh->c_usecs) / qh->period)
583 : : (qh->usecs * 8);
584 :
585 : /* maybe enable periodic schedule processing */
586 180 : return enable_periodic(ehci);
587 : }
588 :
589 : static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
590 : {
591 8 : unsigned i;
592 8 : unsigned period;
593 8 :
594 8 : // FIXME:
595 : // IF this isn't high speed
596 : // and this qh is active in the current uframe
597 : // (and overlay token SplitXstate is false?)
598 : // THEN
599 : // qh->hw_info1 |= cpu_to_hc32(1 << 7 /* "ignore" */);
600 :
601 : /* high bandwidth, or otherwise part of every microframe */
602 32 : if ((period = qh->period) == 0)
603 8 : period = 1;
604 :
605 48 : for (i = qh->start; i < ehci->periodic_size; i += period)
606 32 : periodic_unlink (ehci, i, qh);
607 16 :
608 : /* update per-qh bandwidth for usbfs */
609 104 : ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period
610 : ? ((qh->usecs + qh->c_usecs) / qh->period)
611 : : (qh->usecs * 8);
612 :
613 : dev_dbg (&qh->dev->dev,
614 : "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
615 : qh->period,
616 : hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
617 : qh, qh->start, qh->usecs, qh->c_usecs);
618 :
619 : /* qh->qh_next still "live" to HC */
620 8 : qh->qh_state = QH_STATE_UNLINK;
621 8 : qh->qh_next.ptr = NULL;
622 24 : qh_put (qh);
623 :
624 : /* maybe turn off periodic schedule */
625 40 : return disable_periodic(ehci);
626 : }
627 :
628 : static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
629 : {
630 8 : unsigned wait;
631 16 : struct ehci_qh_hw *hw = qh->hw;
632 8 : int rc;
633 8 :
634 8 : /* If the QH isn't linked then there's nothing we can do
635 8 : * unless we were called during a giveback, in which case
636 8 : * qh_completions() has to deal with it.
637 8 : */
638 32 : if (qh->qh_state != QH_STATE_LINKED) {
639 32 : if (qh->qh_state == QH_STATE_COMPLETING)
640 16 : qh->needs_rescan = 1;
641 8 : return;
642 : }
643 :
644 16 : qh_unlink_periodic (ehci, qh);
645 :
646 : /* simple/paranoid: always delay, expecting the HC needs to read
647 : * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
648 : * expect khubd to clean up after any CSPLITs we won't issue.
649 : * active high speed queues may need bigger delays...
650 : */
651 64 : if (list_empty (&qh->qtd_list)
652 : || (cpu_to_hc32(ehci, QH_CMASK)
653 : & hw->hw_info2) != 0)
654 16 : wait = 2;
655 : else
656 8 : wait = 55; /* worst case: 3 * 1024 */
657 :
658 16 : udelay (wait);
659 16 : qh->qh_state = QH_STATE_IDLE;
660 32 : hw->hw_next = EHCI_LIST_END(ehci);
661 8 : wmb ();
662 :
663 40 : qh_completions(ehci, qh);
664 :
665 : /* reschedule QH iff another request is queued */
666 64 : if (!list_empty(&qh->qtd_list) &&
667 : HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
668 32 : rc = qh_schedule(ehci, qh);
669 :
670 : /* An error here likely indicates handshake failure
671 : * or no space left in the schedule. Neither fault
672 : * should happen often ...
673 : *
674 : * FIXME kill the now-dysfunctional queued urbs
675 : */
676 16 : if (rc != 0)
677 64 : ehci_err(ehci, "can't reschedule qh %p, err %d\n",
678 : qh, rc);
679 32 : }
680 : }
681 :
682 : /*-------------------------------------------------------------------------*/
683 :
684 : static int check_period (
685 : struct ehci_hcd *ehci,
686 : unsigned frame,
687 108 : unsigned uframe,
688 108 : unsigned period,
689 108 : unsigned usecs
690 108 : ) {
691 : int claimed;
692 :
693 : /* complete split running into next frame?
694 : * given FSTN support, we could sometimes check...
695 : */
696 216 : if (uframe >= 8)
697 108 : return 0;
698 :
699 : /*
700 : * 80% periodic == 100 usec/uframe available
701 : * convert "usecs we need" to "max already claimed"
702 : */
703 108 : usecs = 100 - usecs;
704 :
705 : /* we "know" 2 and 4 uframe intervals were rejected; so
706 : * for period 0, check _every_ microframe in the schedule.
707 : */
708 432 : if (unlikely (period == 0)) {
709 108 : do {
710 540 : for (uframe = 0; uframe < 7; uframe++) {
711 540 : claimed = periodic_usecs (ehci, frame, uframe);
712 324 : if (claimed > usecs)
713 108 : return 0;
714 : }
715 324 : } while ((frame += 1) < ehci->periodic_size);
716 :
717 108 : /* just check the specified uframe, at that period */
718 : } else {
719 : do {
720 324 : claimed = periodic_usecs (ehci, frame, uframe);
721 216 : if (claimed > usecs)
722 108 : return 0;
723 432 : } while ((frame += period) < ehci->periodic_size);
724 : }
725 108 :
726 : // success!
727 216 : return 1;
728 : }
729 :
730 : static int check_intr_schedule (
731 : struct ehci_hcd *ehci,
732 : unsigned frame,
733 36 : unsigned uframe,
734 36 : const struct ehci_qh *qh,
735 36 : __hc32 *c_maskp
736 36 : )
737 36 : {
738 72 : int retval = -ENOSPC;
739 36 : u8 mask = 0;
740 :
741 180 : if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
742 36 : goto done;
743 :
744 324 : if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
745 36 : goto done;
746 108 : if (!qh->c_usecs) {
747 36 : retval = 0;
748 36 : *c_maskp = 0;
749 36 : goto done;
750 : }
751 :
752 : #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
753 : if (tt_available (ehci, qh->period, qh->dev, frame, uframe,
754 : qh->tt_usecs)) {
755 : unsigned i;
756 :
757 : /* TODO : this may need FSTN for SSPLIT in uframe 5. */
758 : for (i=uframe+1; i<8 && i<uframe+4; i++)
759 : if (!check_period (ehci, frame, i,
760 : qh->period, qh->c_usecs))
761 : goto done;
762 : else
763 : mask |= 1 << i;
764 :
765 : retval = 0;
766 :
767 : *c_maskp = cpu_to_hc32(ehci, mask << 8);
768 : }
769 : #else
770 : /* Make sure this tt's buffer is also available for CSPLITs.
771 : * We pessimize a bit; probably the typical full speed case
772 : * doesn't need the second CSPLIT.
773 : *
774 : * NOTE: both SPLIT and CSPLIT could be checked in just
775 : * one smart pass...
776 : */
777 72 : mask = 0x03 << (uframe + qh->gap_uf);
778 72 : *c_maskp = cpu_to_hc32(ehci, mask << 8);
779 :
780 36 : mask |= 1 << uframe;
781 324 : if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) {
782 360 : if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
783 : qh->period, qh->c_usecs))
784 36 : goto done;
785 360 : if (!check_period (ehci, frame, uframe + qh->gap_uf,
786 : qh->period, qh->c_usecs))
787 36 : goto done;
788 36 : retval = 0;
789 : }
790 : #endif
791 : done:
792 252 : return retval;
793 : }
794 :
795 : /* "first fit" scheduling policy used the first time through,
796 : * or when the previous schedule slot can't be re-used.
797 : */
798 : static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
799 : {
800 9 : int status;
801 9 : unsigned uframe;
802 9 : __hc32 c_mask;
803 9 : unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
804 18 : struct ehci_qh_hw *hw = qh->hw;
805 9 :
806 45 : qh_refresh(ehci, qh);
807 27 : hw->hw_next = EHCI_LIST_END(ehci);
808 27 : frame = qh->start;
809 9 :
810 9 : /* reuse the previous schedule slots, if we can */
811 36 : if (frame < qh->period) {
812 45 : uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK);
813 63 : status = check_intr_schedule (ehci, frame, --uframe,
814 : qh, &c_mask);
815 : } else {
816 9 : uframe = 0;
817 9 : c_mask = 0;
818 9 : status = -ENOSPC;
819 : }
820 :
821 : /* else scan the schedule to find a group of slots such that all
822 : * uframes have enough periodic bandwidth available.
823 : */
824 36 : if (status) {
825 : /* "normal" case, uframing flexible except with splits */
826 54 : if (qh->period) {
827 : int i;
828 :
829 108 : for (i = qh->period; status && i > 0; --i) {
830 54 : frame = ++ehci->random_frame % qh->period;
831 54 : for (uframe = 0; uframe < 8; uframe++) {
832 72 : status = check_intr_schedule (ehci,
833 9 : frame, uframe, qh,
834 : &c_mask);
835 18 : if (status == 0)
836 9 : break;
837 9 : }
838 : }
839 :
840 : /* qh->period == 0 means every uframe */
841 : } else {
842 18 : frame = 0;
843 108 : status = check_intr_schedule (ehci, 0, 0, qh, &c_mask);
844 : }
845 36 : if (status)
846 18 : goto done;
847 18 : qh->start = frame;
848 :
849 : /* reset S-frame and (maybe) C-frame masks */
850 45 : hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
851 99 : hw->hw_info2 |= qh->period
852 : ? cpu_to_hc32(ehci, 1 << uframe)
853 : : cpu_to_hc32(ehci, QH_SMASK);
854 18 : hw->hw_info2 |= c_mask;
855 : } else
856 : ehci_dbg (ehci, "reused qh %p schedule\n", qh);
857 :
858 : /* stuff into the periodic schedule */
859 72 : status = qh_link_periodic (ehci, qh);
860 : done:
861 36 : return status;
862 : }
863 :
864 : static int intr_submit (
865 : struct ehci_hcd *ehci,
866 : struct urb *urb,
867 1 : struct list_head *qtd_list,
868 1 : gfp_t mem_flags
869 1 : ) {
870 1 : unsigned epnum;
871 1 : unsigned long flags;
872 1 : struct ehci_qh *qh;
873 1 : int status;
874 1 : struct list_head empty;
875 1 :
876 1 : /* get endpoint and transfer/schedule data */
877 2 : epnum = urb->ep->desc.bEndpointAddress;
878 1 :
879 4 : spin_lock_irqsave (&ehci->lock, flags);
880 1 :
881 9 : if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
882 : &ehci_to_hcd(ehci)->flags))) {
883 1 : status = -ESHUTDOWN;
884 1 : goto done_not_linked;
885 : }
886 3 : status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
887 4 : if (unlikely(status))
888 1 : goto done_not_linked;
889 :
890 : /* get qh and force any scheduling errors */
891 2 : INIT_LIST_HEAD (&empty);
892 4 : qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
893 2 : if (qh == NULL) {
894 1 : status = -ENOMEM;
895 1 : goto done;
896 : }
897 3 : if (qh->qh_state == QH_STATE_IDLE) {
898 6 : if ((status = qh_schedule (ehci, qh)) != 0)
899 1 : goto done;
900 : }
901 :
902 : /* then queue the urb's tds to the qh */
903 8 : qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
904 6 : BUG_ON (qh == NULL);
905 :
906 : /* ... update usbfs periodic stats */
907 3 : ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
908 :
909 1 : done:
910 8 : if (unlikely(status))
911 3 : usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
912 : done_not_linked:
913 8 : spin_unlock_irqrestore (&ehci->lock, flags);
914 2 : if (status)
915 2 : qtd_list_free (ehci, urb, qtd_list);
916 2 :
917 2 : return status;
918 : }
919 :
920 : /*-------------------------------------------------------------------------*/
921 :
922 : /* ehci_iso_stream ops work with both ITD and SITD */
923 :
924 : static struct ehci_iso_stream *
925 : iso_stream_alloc (gfp_t mem_flags)
926 : {
927 2 : struct ehci_iso_stream *stream;
928 2 :
929 8 : stream = kzalloc(sizeof *stream, mem_flags);
930 8 : if (likely (stream != NULL)) {
931 4 : INIT_LIST_HEAD(&stream->td_list);
932 4 : INIT_LIST_HEAD(&stream->free_list);
933 2 : stream->next_uframe = -1;
934 2 : stream->refcount = 1;
935 : }
936 4 : return stream;
937 : }
938 :
939 : static void
940 : iso_stream_init (
941 : struct ehci_hcd *ehci,
942 : struct ehci_iso_stream *stream,
943 2 : struct usb_device *dev,
944 2 : int pipe,
945 2 : unsigned interval
946 2 : )
947 2 : {
948 14 : static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
949 2 :
950 2 : u32 buf1;
951 2 : unsigned epnum, maxp;
952 2 : int is_input;
953 2 : long bandwidth;
954 2 :
955 2 : /*
956 2 : * this might be a "high bandwidth" highspeed endpoint,
957 2 : * as encoded in the ep descriptor's wMaxPacket field
958 2 : */
959 4 : epnum = usb_pipeendpoint (pipe);
960 2 : is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
961 8 : maxp = usb_maxpacket(dev, pipe, !is_input);
962 4 : if (is_input) {
963 2 : buf1 = (1 << 11);
964 : } else {
965 2 : buf1 = 0;
966 : }
967 :
968 : /* knows about ITD vs SITD */
969 6 : if (dev->speed == USB_SPEED_HIGH) {
970 2 : unsigned multi = hb_mult(maxp);
971 :
972 2 : stream->highspeed = 1;
973 :
974 2 : maxp = max_packet(maxp);
975 2 : buf1 |= maxp;
976 2 : maxp *= multi;
977 :
978 6 : stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum);
979 4 : stream->buf1 = cpu_to_hc32(ehci, buf1);
980 4 : stream->buf2 = cpu_to_hc32(ehci, multi);
981 :
982 : /* usbfs wants to report the average usecs per frame tied up
983 : * when transfers on this endpoint are scheduled ...
984 : */
985 2 : stream->usecs = HS_USECS_ISO (maxp);
986 4 : bandwidth = stream->usecs * 8;
987 2 : bandwidth /= interval;
988 :
989 : } else {
990 : u32 addr;
991 : int think_time;
992 : int hs_transfers;
993 :
994 2 : addr = dev->ttport << 24;
995 18 : if (!ehci_is_TDI(ehci)
996 : || (dev->tt->hub !=
997 : ehci_to_hcd(ehci)->self.root_hub))
998 4 : addr |= dev->tt->hub->devnum << 16;
999 4 : addr |= epnum << 8;
1000 8 : addr |= dev->devnum;
1001 4 : stream->usecs = HS_USECS_ISO (maxp);
1002 32 : think_time = dev->tt ? dev->tt->think_time : 0;
1003 12 : stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time (
1004 : dev->speed, is_input, 1, maxp));
1005 32 : hs_transfers = max (1u, (maxp + 187) / 188);
1006 8 : if (is_input) {
1007 : u32 tmp;
1008 :
1009 4 : addr |= 1 << 31;
1010 4 : stream->c_usecs = stream->usecs;
1011 4 : stream->usecs = HS_USECS_ISO (1);
1012 4 : stream->raw_mask = 1;
1013 :
1014 : /* c-mask as specified in USB 2.0 11.18.4 3.c */
1015 4 : tmp = (1 << (hs_transfers + 2)) - 1;
1016 8 : stream->raw_mask |= tmp << (8 + 2);
1017 : } else
1018 4 : stream->raw_mask = smask_out [hs_transfers - 1];
1019 12 : bandwidth = stream->usecs + stream->c_usecs;
1020 4 : bandwidth /= interval << 3;
1021 :
1022 : /* stream->splits gets created from raw_mask later */
1023 8 : stream->address = cpu_to_hc32(ehci, addr);
1024 : }
1025 4 : stream->bandwidth = bandwidth;
1026 :
1027 4 : stream->udev = dev;
1028 :
1029 4 : stream->bEndpointAddress = is_input | epnum;
1030 4 : stream->interval = interval;
1031 4 : stream->maxp = maxp;
1032 4 : }
1033 :
1034 : static void
1035 : iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
1036 : {
1037 116 : stream->refcount--;
1038 58 :
1039 58 : /* free whenever just a dev->ep reference remains.
1040 58 : * not like a QH -- no persistent state (toggle, halt)
1041 58 : */
1042 174 : if (stream->refcount == 1) {
1043 58 : int is_in;
1044 58 :
1045 58 : // BUG_ON (!list_empty(&stream->td_list));
1046 58 :
1047 348 : while (!list_empty (&stream->free_list)) {
1048 58 : struct list_head *entry;
1049 58 :
1050 58 : entry = stream->free_list.next;
1051 116 : list_del (entry);
1052 :
1053 : /* knows about ITD vs SITD */
1054 174 : if (stream->highspeed) {
1055 : struct ehci_itd *itd;
1056 :
1057 116 : itd = list_entry (entry, struct ehci_itd,
1058 : itd_list);
1059 58 : dma_pool_free (ehci->itd_pool, itd,
1060 : itd->itd_dma);
1061 : } else {
1062 : struct ehci_sitd *sitd;
1063 :
1064 116 : sitd = list_entry (entry, struct ehci_sitd,
1065 : sitd_list);
1066 58 : dma_pool_free (ehci->sitd_pool, sitd,
1067 : sitd->sitd_dma);
1068 : }
1069 58 : }
1070 :
1071 406 : is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
1072 116 : stream->bEndpointAddress &= 0x0f;
1073 174 : if (stream->ep)
1074 58 : stream->ep->hcpriv = NULL;
1075 :
1076 116 : if (stream->rescheduled) {
1077 580 : ehci_info (ehci, "ep%d%s-iso rescheduled "
1078 232 : "%lu times in %lu seconds\n",
1079 : stream->bEndpointAddress, is_in ? "in" : "out",
1080 : stream->rescheduled,
1081 : ((jiffies - stream->start)/HZ)
1082 : );
1083 : }
1084 :
1085 116 : kfree(stream);
1086 : }
1087 174 : }
1088 :
1089 : static inline struct ehci_iso_stream *
1090 : iso_stream_get (struct ehci_iso_stream *stream)
1091 : {
1092 40 : if (likely (stream != NULL))
1093 8 : stream->refcount++;
1094 8 : return stream;
1095 : }
1096 :
1097 : static struct ehci_iso_stream *
1098 : iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
1099 : {
1100 2 : unsigned epnum;
1101 2 : struct ehci_iso_stream *stream;
1102 2 : struct usb_host_endpoint *ep;
1103 2 : unsigned long flags;
1104 2 :
1105 4 : epnum = usb_pipeendpoint (urb->pipe);
1106 6 : if (usb_pipein(urb->pipe))
1107 4 : ep = urb->dev->ep_in[epnum];
1108 : else
1109 2 : ep = urb->dev->ep_out[epnum];
1110 :
1111 6 : spin_lock_irqsave (&ehci->lock, flags);
1112 4 : stream = ep->hcpriv;
1113 :
1114 8 : if (unlikely (stream == NULL)) {
1115 6 : stream = iso_stream_alloc(GFP_ATOMIC);
1116 8 : if (likely (stream != NULL)) {
1117 : /* dev->ep owns the initial refcount */
1118 2 : ep->hcpriv = stream;
1119 2 : stream->ep = ep;
1120 10 : iso_stream_init(ehci, stream, urb->dev, urb->pipe,
1121 : urb->interval);
1122 : }
1123 :
1124 : /* if dev->ep [epnum] is a QH, hw is set */
1125 10 : } else if (unlikely (stream->hw != NULL)) {
1126 : ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
1127 : urb->dev->devpath, epnum,
1128 : usb_pipein(urb->pipe) ? "in" : "out");
1129 2 : stream = NULL;
1130 : }
1131 :
1132 : /* caller guarantees an eventual matching iso_stream_put */
1133 12 : stream = iso_stream_get (stream);
1134 :
1135 4 : spin_unlock_irqrestore (&ehci->lock, flags);
1136 2 : return stream;
1137 : }
1138 :
1139 : /*-------------------------------------------------------------------------*/
1140 :
1141 : /* ehci_iso_sched ops can be ITD-only or SITD-only */
1142 :
1143 : static struct ehci_iso_sched *
1144 : iso_sched_alloc (unsigned packets, gfp_t mem_flags)
1145 : {
1146 2 : struct ehci_iso_sched *iso_sched;
1147 4 : int size = sizeof *iso_sched;
1148 2 :
1149 4 : size += packets * sizeof (struct ehci_iso_packet);
1150 6 : iso_sched = kzalloc(size, mem_flags);
1151 8 : if (likely (iso_sched != NULL)) {
1152 4 : INIT_LIST_HEAD (&iso_sched->td_list);
1153 : }
1154 4 : return iso_sched;
1155 : }
1156 :
1157 : static inline void
1158 : itd_sched_init(
1159 : struct ehci_hcd *ehci,
1160 : struct ehci_iso_sched *iso_sched,
1161 1 : struct ehci_iso_stream *stream,
1162 1 : struct urb *urb
1163 1 : )
1164 1 : {
1165 1 : unsigned i;
1166 2 : dma_addr_t dma = urb->transfer_dma;
1167 1 :
1168 1 : /* how many uframes are needed for these transfers */
1169 2 : iso_sched->span = urb->number_of_packets * stream->interval;
1170 :
1171 : /* figure out per-uframe itd fields that we'll need later
1172 : * when we fit new itds into the schedule.
1173 : */
1174 6 : for (i = 0; i < urb->number_of_packets; i++) {
1175 3 : struct ehci_iso_packet *uframe = &iso_sched->packet [i];
1176 1 : unsigned length;
1177 : dma_addr_t buf;
1178 : u32 trans;
1179 :
1180 1 : length = urb->iso_frame_desc [i].length;
1181 2 : buf = dma + urb->iso_frame_desc [i].offset;
1182 :
1183 1 : trans = EHCI_ISOC_ACTIVE;
1184 1 : trans |= buf & 0x0fff;
1185 7 : if (unlikely (((i + 1) == urb->number_of_packets))
1186 : && !(urb->transfer_flags & URB_NO_INTERRUPT))
1187 1 : trans |= EHCI_ITD_IOC;
1188 1 : trans |= length << 16;
1189 2 : uframe->transaction = cpu_to_hc32(ehci, trans);
1190 :
1191 : /* might need to cross a buffer page within a uframe */
1192 1 : uframe->bufp = (buf & ~(u64)0x0fff);
1193 1 : buf += length;
1194 4 : if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
1195 1 : uframe->cross = 1;
1196 : }
1197 : }
1198 :
1199 : static void
1200 : iso_sched_free (
1201 : struct ehci_iso_stream *stream,
1202 : struct ehci_iso_sched *iso_sched
1203 : )
1204 : {
1205 20 : if (!iso_sched)
1206 10 : return;
1207 : // caller must hold ehci->lock!
1208 30 : list_splice (&iso_sched->td_list, &stream->free_list);
1209 10 : kfree (iso_sched);
1210 10 : }
1211 :
1212 : static int
1213 : itd_urb_transaction (
1214 : struct ehci_iso_stream *stream,
1215 : struct ehci_hcd *ehci,
1216 1 : struct urb *urb,
1217 1 : gfp_t mem_flags
1218 1 : )
1219 1 : {
1220 1 : struct ehci_itd *itd;
1221 1 : dma_addr_t itd_dma;
1222 1 : int i;
1223 1 : unsigned num_itds;
1224 1 : struct ehci_iso_sched *sched;
1225 1 : unsigned long flags;
1226 1 :
1227 5 : sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1228 5 : if (unlikely (sched == NULL))
1229 1 : return -ENOMEM;
1230 :
1231 2 : itd_sched_init(ehci, sched, stream, urb);
1232 :
1233 2 : if (urb->interval < 8)
1234 1 : num_itds = 1 + (sched->span + 7) / 8;
1235 : else
1236 2 : num_itds = urb->number_of_packets;
1237 :
1238 : /* allocate/init ITDs */
1239 3 : spin_lock_irqsave (&ehci->lock, flags);
1240 5 : for (i = 0; i < num_itds; i++) {
1241 1 :
1242 2 : /* free_list.next might be cache-hot ... but maybe
1243 : * the HC caches it too. avoid that issue for now.
1244 : */
1245 :
1246 : /* prefer previously-allocated itds */
1247 6 : if (likely (!list_empty(&stream->free_list))) {
1248 2 : itd = list_entry (stream->free_list.prev,
1249 : struct ehci_itd, itd_list);
1250 2 : list_del (&itd->itd_list);
1251 1 : itd_dma = itd->itd_dma;
1252 : } else {
1253 2 : spin_unlock_irqrestore (&ehci->lock, flags);
1254 2 : itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
1255 : &itd_dma);
1256 3 : spin_lock_irqsave (&ehci->lock, flags);
1257 2 : if (!itd) {
1258 3 : iso_sched_free(stream, sched);
1259 2 : spin_unlock_irqrestore(&ehci->lock, flags);
1260 1 : return -ENOMEM;
1261 : }
1262 : }
1263 :
1264 2 : memset (itd, 0, sizeof *itd);
1265 2 : itd->itd_dma = itd_dma;
1266 4 : list_add (&itd->itd_list, &sched->td_list);
1267 : }
1268 2 : spin_unlock_irqrestore (&ehci->lock, flags);
1269 :
1270 : /* temporarily store schedule info in hcpriv */
1271 1 : urb->hcpriv = sched;
1272 1 : urb->error_count = 0;
1273 1 : return 0;
1274 : }
1275 :
1276 : /*-------------------------------------------------------------------------*/
1277 :
1278 : static inline int
1279 : itd_slot_ok (
1280 : struct ehci_hcd *ehci,
1281 : u32 mod,
1282 2 : u32 uframe,
1283 : u8 usecs,
1284 : u32 period
1285 : )
1286 : {
1287 2 : uframe %= period;
1288 2 : do {
1289 : /* can't commit more than 80% periodic == 100 usec */
1290 8 : if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
1291 : > (100 - usecs))
1292 2 : return 0;
1293 :
1294 : /* we know urb->interval is 2^N uframes */
1295 2 : uframe += period;
1296 4 : } while (uframe < mod);
1297 2 : return 1;
1298 2 : }
1299 :
1300 : static inline int
1301 : sitd_slot_ok (
1302 : struct ehci_hcd *ehci,
1303 : u32 mod,
1304 2 : struct ehci_iso_stream *stream,
1305 2 : u32 uframe,
1306 2 : struct ehci_iso_sched *sched,
1307 2 : u32 period_uframes
1308 2 : )
1309 2 : {
1310 2 : u32 mask, tmp;
1311 2 : u32 frame, uf;
1312 :
1313 4 : mask = stream->raw_mask << (uframe & 7);
1314 :
1315 : /* for IN, don't wrap CSPLIT into the next frame */
1316 4 : if (mask & ~0xffff)
1317 2 : return 0;
1318 :
1319 : /* this multi-pass logic is simple, but performance may
1320 : * suffer when the schedule data isn't cached.
1321 : */
1322 :
1323 : /* check bandwidth */
1324 2 : uframe %= period_uframes;
1325 2 : do {
1326 : u32 max_used;
1327 :
1328 2 : frame = uframe >> 3;
1329 2 : uf = uframe & 7;
1330 :
1331 : #ifdef CONFIG_USB_EHCI_TT_NEWSCHED
1332 : /* The tt's fullspeed bus bandwidth must be available.
1333 : * tt_available scheduling guarantees 10+% for control/bulk.
1334 : */
1335 : if (!tt_available (ehci, period_uframes << 3,
1336 : stream->udev, frame, uf, stream->tt_usecs))
1337 : return 0;
1338 : #else
1339 : /* tt must be idle for start(s), any gap, and csplit.
1340 : * assume scheduling slop leaves 10+% for control/bulk.
1341 : */
1342 16 : if (!tt_no_collision (ehci, period_uframes << 3,
1343 : stream->udev, frame, mask))
1344 2 : return 0;
1345 : #endif
1346 :
1347 : /* check starts (OUT uses more than one) */
1348 4 : max_used = 100 - stream->usecs;
1349 14 : for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) {
1350 12 : if (periodic_usecs (ehci, frame, uf) > max_used)
1351 4 : return 0;
1352 : }
1353 :
1354 : /* for IN, check CSPLIT */
1355 6 : if (stream->c_usecs) {
1356 2 : uf = uframe & 7;
1357 4 : max_used = 100 - stream->c_usecs;
1358 2 : do {
1359 2 : tmp = 1 << uf;
1360 2 : tmp <<= 8;
1361 6 : if ((stream->raw_mask & tmp) == 0)
1362 2 : continue;
1363 8 : if (periodic_usecs (ehci, frame, uf)
1364 : > max_used)
1365 2 : return 0;
1366 12 : } while (++uf < 8);
1367 : }
1368 4 :
1369 2 : /* we know urb->interval is 2^N uframes */
1370 6 : uframe += period_uframes;
1371 12 : } while (uframe < mod);
1372 :
1373 24 : stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7));
1374 2 : return 1;
1375 : }
1376 :
1377 : /*
1378 : * This scheduler plans almost as far into the future as it has actual
1379 : * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
1380 : * "as small as possible" to be cache-friendlier.) That limits the size
1381 : * transfers you can stream reliably; avoid more than 64 msec per urb.
1382 : * Also avoid queue depths of less than ehci's worst irq latency (affected
1383 : * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
1384 : * and other factors); or more than about 230 msec total (for portability,
1385 : * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
1386 : */
1387 :
1388 : #define SCHEDULE_SLOP 80 /* microframes */
1389 :
1390 : static int
1391 : iso_stream_schedule (
1392 : struct ehci_hcd *ehci,
1393 2 : struct urb *urb,
1394 2 : struct ehci_iso_stream *stream
1395 2 : )
1396 2 : {
1397 2 : u32 now, next, start, period;
1398 2 : int status;
1399 4 : unsigned mod = ehci->periodic_size << 3;
1400 6 : struct ehci_iso_sched *sched = urb->hcpriv;
1401 2 : struct pci_dev *pdev;
1402 2 :
1403 6 : if (sched->span > (mod - SCHEDULE_SLOP)) {
1404 2 : ehci_dbg (ehci, "iso request %p too long\n", urb);
1405 4 : status = -EFBIG;
1406 4 : goto fail;
1407 2 : }
1408 :
1409 6 : if ((stream->depth + sched->span) > mod) {
1410 : ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n",
1411 : urb, stream->depth, sched->span, mod);
1412 2 : status = -EFBIG;
1413 2 : goto fail;
1414 : }
1415 :
1416 4 : period = urb->interval;
1417 6 : if (!stream->highspeed)
1418 2 : period <<= 3;
1419 :
1420 6 : now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
1421 :
1422 : /* Typical case: reuse current schedule, stream is still active.
1423 : * Hopefully there are no gaps from the host falling behind
1424 : * (irq delays etc), but if there are we'll take the next
1425 : * slot in the schedule, implicitly assuming URB_ISO_ASAP.
1426 : */
1427 12 : if (likely (!list_empty (&stream->td_list))) {
1428 8 : pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
1429 4 : start = stream->next_uframe;
1430 :
1431 : /* For high speed devices, allow scheduling within the
1432 : * isochronous scheduling threshold. For full speed devices,
1433 : * don't. (Work around for Intel ICH9 bug.)
1434 : */
1435 12 : if (!stream->highspeed &&
1436 : pdev->vendor == PCI_VENDOR_ID_INTEL)
1437 2 : next = now + ehci->i_thresh;
1438 : else
1439 2 : next = now;
1440 :
1441 : /* Fell behind (by up to twice the slop amount)? */
1442 4 : if (((start - next) & (mod - 1)) >=
1443 : mod - 2 * SCHEDULE_SLOP)
1444 2 : start += period * DIV_ROUND_UP(
1445 : (next - start) & (mod - 1),
1446 : period);
1447 :
1448 : /* Tried to schedule too far into the future? */
1449 8 : if (unlikely(((start - now) & (mod - 1)) + sched->span
1450 : >= mod - 2 * SCHEDULE_SLOP)) {
1451 2 : status = -EFBIG;
1452 2 : goto fail;
1453 : }
1454 2 : stream->next_uframe = start;
1455 2 : goto ready;
1456 : }
1457 :
1458 : /* need to schedule; when's the next (u)frame we could start?
1459 : * this is bigger than ehci->i_thresh allows; scheduling itself
1460 : * isn't free, the slop should handle reasonably slow cpus. it
1461 : * can also help high bandwidth if the dma and irq loads don't
1462 : * jump until after the queue is primed.
1463 : */
1464 2 : start = SCHEDULE_SLOP + (now & ~0x07);
1465 2 : start %= mod;
1466 2 : stream->next_uframe = start;
1467 :
1468 : /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
1469 :
1470 : /* find a uframe slot with enough bandwidth */
1471 14 : for (; start < (stream->next_uframe + period); start++) {
1472 8 : int enough_space;
1473 2 :
1474 : /* check schedule: enough space? */
1475 6 : if (stream->highspeed)
1476 6 : enough_space = itd_slot_ok (ehci, mod, start,
1477 : stream->usecs, period);
1478 : else {
1479 4 : if ((start % 8) >= 6)
1480 2 : continue;
1481 12 : enough_space = sitd_slot_ok (ehci, mod, stream,
1482 : start, sched, period);
1483 : }
1484 :
1485 : /* schedule it here if there's enough bandwidth */
1486 8 : if (enough_space) {
1487 4 : stream->next_uframe = start % mod;
1488 4 : goto ready;
1489 : }
1490 : }
1491 :
1492 4 : /* no room in the schedule */
1493 : ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",
1494 : list_empty (&stream->td_list) ? "" : "re",
1495 : urb, now, now + mod);
1496 2 : status = -ENOSPC;
1497 2 :
1498 : fail:
1499 18 : iso_sched_free (stream, sched);
1500 2 : urb->hcpriv = NULL;
1501 2 : return status;
1502 6 :
1503 : ready:
1504 : /* report high speed start in uframes; full speed, in frames */
1505 6 : urb->start_frame = stream->next_uframe;
1506 18 : if (!stream->highspeed)
1507 6 : urb->start_frame >>= 3;
1508 6 : return 0;
1509 : }
1510 :
1511 : /*-------------------------------------------------------------------------*/
1512 :
1513 : static inline void
1514 : itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
1515 : struct ehci_itd *itd)
1516 : {
1517 1 : int i;
1518 :
1519 : /* it's been recently zeroed */
1520 2 : itd->hw_next = EHCI_LIST_END(ehci);
1521 1 : itd->hw_bufp [0] = stream->buf0;
1522 1 : itd->hw_bufp [1] = stream->buf1;
1523 1 : itd->hw_bufp [2] = stream->buf2;
1524 :
1525 5 : for (i = 0; i < 8; i++)
1526 3 : itd->index[i] = -1;
1527 1 :
1528 : /* All other fields are filled when scheduling */
1529 : }
1530 :
1531 : static inline void
1532 1 : itd_patch(
1533 : struct ehci_hcd *ehci,
1534 : struct ehci_itd *itd,
1535 2 : struct ehci_iso_sched *iso_sched,
1536 2 : unsigned index,
1537 2 : u16 uframe
1538 2 : )
1539 2 : {
1540 4 : struct ehci_iso_packet *uf = &iso_sched->packet [index];
1541 4 : unsigned pg = itd->pg;
1542 2 :
1543 2 : // BUG_ON (pg == 6 && uf->cross);
1544 :
1545 2 : uframe &= 0x07;
1546 2 : itd->index [uframe] = index;
1547 :
1548 2 : itd->hw_transaction[uframe] = uf->transaction;
1549 6 : itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
1550 8 : itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0);
1551 6 : itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
1552 :
1553 : /* iso_frame_desc[].offset must be strictly increasing */
1554 10 : if (unlikely (uf->cross)) {
1555 2 : u64 bufp = uf->bufp + 4096;
1556 :
1557 4 : itd->pg = ++pg;
1558 6 : itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0);
1559 6 : itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32));
1560 : }
1561 4 : }
1562 :
1563 : static inline void
1564 : itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
1565 : {
1566 2 : union ehci_shadow *prev = &ehci->pshadow[frame];
1567 2 : __hc32 *hw_p = &ehci->periodic[frame];
1568 2 : union ehci_shadow here = *prev;
1569 2 : __hc32 type = 0;
1570 1 :
1571 1 : /* skip any iso nodes which might belong to previous microframes */
1572 3 : while (here.ptr) {
1573 4 : type = Q_NEXT_TYPE(ehci, *hw_p);
1574 5 : if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
1575 1 : break;
1576 2 : prev = periodic_next_shadow(ehci, prev, type);
1577 2 : hw_p = shadow_next_periodic(ehci, &here, type);
1578 2 : here = *prev;
1579 : }
1580 1 :
1581 2 : itd->itd_next = here;
1582 2 : itd->hw_next = *hw_p;
1583 2 : prev->itd = itd;
1584 2 : itd->frame = frame;
1585 2 : wmb ();
1586 6 : *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
1587 1 : }
1588 :
1589 : /* fit urb's itds into the selected schedule slot; activate as needed */
1590 : static int
1591 : itd_link_urb (
1592 : struct ehci_hcd *ehci,
1593 : struct urb *urb,
1594 1 : unsigned mod,
1595 1 : struct ehci_iso_stream *stream
1596 1 : )
1597 1 : {
1598 1 : int packet;
1599 1 : unsigned next_uframe, uframe, frame;
1600 3 : struct ehci_iso_sched *iso_sched = urb->hcpriv;
1601 1 : struct ehci_itd *itd;
1602 1 :
1603 3 : next_uframe = stream->next_uframe % mod;
1604 1 :
1605 7 : if (unlikely (list_empty(&stream->td_list))) {
1606 1 : ehci_to_hcd(ehci)->self.bandwidth_allocated
1607 5 : += stream->bandwidth;
1608 : ehci_vdbg (ehci,
1609 : "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
1610 : urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1611 : (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1612 : urb->interval,
1613 : next_uframe >> 3, next_uframe & 0x7);
1614 1 : stream->start = jiffies;
1615 : }
1616 5 : ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
1617 :
1618 : /* fill iTDs uframe by uframe */
1619 5 : for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) {
1620 3 : if (itd == NULL) {
1621 1 : /* ASSERT: we have all necessary itds */
1622 : // BUG_ON (list_empty (&iso_sched->td_list));
1623 :
1624 : /* ASSERT: no itds for this endpoint in this uframe */
1625 :
1626 2 : itd = list_entry (iso_sched->td_list.next,
1627 : struct ehci_itd, itd_list);
1628 2 : list_move_tail (&itd->itd_list, &stream->td_list);
1629 2 : itd->stream = iso_stream_get (stream);
1630 1 : itd->urb = urb;
1631 2 : itd_init (ehci, stream, itd);
1632 : }
1633 :
1634 2 : uframe = next_uframe & 0x07;
1635 2 : frame = next_uframe >> 3;
1636 :
1637 6 : itd_patch(ehci, itd, iso_sched, packet, uframe);
1638 :
1639 2 : next_uframe += stream->interval;
1640 3 : stream->depth += stream->interval;
1641 1 : next_uframe %= mod;
1642 1 : packet++;
1643 :
1644 : /* link completed itds into the schedule */
1645 4 : if (((next_uframe >> 3) != frame)
1646 : || packet == urb->number_of_packets) {
1647 2 : itd_link (ehci, frame % ehci->periodic_size, itd);
1648 1 : itd = NULL;
1649 : }
1650 : }
1651 1 : stream->next_uframe = next_uframe;
1652 :
1653 2 : /* don't need that schedule data any more */
1654 3 : iso_sched_free (stream, iso_sched);
1655 1 : urb->hcpriv = NULL;
1656 :
1657 4 : timer_action (ehci, TIMER_IO_WATCHDOG);
1658 5 : return enable_periodic(ehci);
1659 : }
1660 :
1661 : #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1662 :
1663 : /* Process and recycle a completed ITD. Return true iff its urb completed,
1664 : * and hence its completion callback probably added things to the hardware
1665 : * schedule.
1666 : *
1667 : * Note that we carefully avoid recycling this descriptor until after any
1668 : * completion callback runs, so that it won't be reused quickly. That is,
1669 : * assuming (a) no more than two urbs per frame on this endpoint, and also
1670 : * (b) only this endpoint's completions submit URBs. It seems some silicon
1671 : * corrupts things if you reuse completed descriptors very quickly...
1672 : */
1673 : static unsigned
1674 : itd_complete (
1675 : struct ehci_hcd *ehci,
1676 3 : struct ehci_itd *itd
1677 3 : ) {
1678 6 : struct urb *urb = itd->urb;
1679 3 : struct usb_iso_packet_descriptor *desc;
1680 3 : u32 t;
1681 3 : unsigned uframe;
1682 6 : int urb_index = -1;
1683 6 : struct ehci_iso_stream *stream = itd->stream;
1684 3 : struct usb_device *dev;
1685 6 : unsigned retval = false;
1686 3 :
1687 3 : /* for each uframe with a packet */
1688 24 : for (uframe = 0; uframe < 8; uframe++) {
1689 27 : if (likely (itd->index[uframe] == -1))
1690 9 : continue;
1691 6 : urb_index = itd->index[uframe];
1692 6 : desc = &urb->iso_frame_desc [urb_index];
1693 :
1694 6 : t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
1695 3 : itd->hw_transaction [uframe] = 0;
1696 9 : stream->depth -= stream->interval;
1697 :
1698 : /* report transfer status */
1699 12 : if (unlikely (t & ISO_ERRS)) {
1700 3 : urb->error_count++;
1701 6 : if (t & EHCI_ISOC_BUF_ERR)
1702 18 : desc->status = usb_pipein (urb->pipe)
1703 : ? -ENOSR /* hc couldn't read */
1704 : : -ECOMM; /* hc couldn't write */
1705 6 : else if (t & EHCI_ISOC_BABBLE)
1706 3 : desc->status = -EOVERFLOW;
1707 : else /* (t & EHCI_ISOC_XACTERR) */
1708 3 : desc->status = -EPROTO;
1709 :
1710 : /* HC need not update length with this error */
1711 6 : if (!(t & EHCI_ISOC_BABBLE)) {
1712 3 : desc->actual_length = EHCI_ITD_LENGTH(t);
1713 3 : urb->actual_length += desc->actual_length;
1714 : }
1715 12 : } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1716 3 : desc->status = 0;
1717 3 : desc->actual_length = EHCI_ITD_LENGTH(t);
1718 3 : urb->actual_length += desc->actual_length;
1719 : } else {
1720 : /* URB was too late */
1721 3 : desc->status = -EXDEV;
1722 : }
1723 : }
1724 6 :
1725 : /* handle completion now? */
1726 12 : if (likely ((urb_index + 1) != urb->number_of_packets))
1727 3 : goto done;
1728 :
1729 : /* ASSERT: it's really the last itd for this urb
1730 : list_for_each_entry (itd, &stream->td_list, itd_list)
1731 : BUG_ON (itd->urb == urb);
1732 : */
1733 :
1734 : /* give urb back to the driver; completion often (re)submits */
1735 3 : dev = urb->dev;
1736 6 : ehci_urb_done(ehci, urb, 0);
1737 3 : retval = true;
1738 3 : urb = NULL;
1739 12 : (void) disable_periodic(ehci);
1740 9 : ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
1741 :
1742 18 : if (unlikely(list_is_singular(&stream->td_list))) {
1743 : ehci_to_hcd(ehci)->self.bandwidth_allocated
1744 15 : -= stream->bandwidth;
1745 : ehci_vdbg (ehci,
1746 : "deschedule devp %s ep%d%s-iso\n",
1747 : dev->devpath, stream->bEndpointAddress & 0x0f,
1748 : (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1749 : }
1750 24 : iso_stream_put (ehci, stream);
1751 :
1752 3 : done:
1753 6 : itd->urb = NULL;
1754 24 : if (ehci->clock_frame != itd->frame || itd->index[7] != -1) {
1755 : /* OK to recycle this ITD now. */
1756 6 : itd->stream = NULL;
1757 12 : list_move(&itd->itd_list, &stream->free_list);
1758 12 : iso_stream_put(ehci, stream);
1759 : } else {
1760 : /* HW might remember this ITD, so we can't recycle it yet.
1761 : * Move it to a safe place until a new frame starts.
1762 : */
1763 12 : list_move(&itd->itd_list, &ehci->cached_itd_list);
1764 6 : if (stream->refcount == 2) {
1765 : /* If iso_stream_put() were called here, stream
1766 : * would be freed. Instead, just prevent reuse.
1767 : */
1768 3 : stream->ep->hcpriv = NULL;
1769 3 : stream->ep = NULL;
1770 : }
1771 : }
1772 6 : return retval;
1773 : }
1774 :
1775 : /*-------------------------------------------------------------------------*/
1776 :
1777 : static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
1778 : gfp_t mem_flags)
1779 1 : {
1780 2 : int status = -EINVAL;
1781 1 : unsigned long flags;
1782 1 : struct ehci_iso_stream *stream;
1783 1 :
1784 1 : /* Get iso_stream head */
1785 3 : stream = iso_stream_find (ehci, urb);
1786 5 : if (unlikely (stream == NULL)) {
1787 1 : ehci_dbg (ehci, "can't get iso stream\n");
1788 2 : return -ENOMEM;
1789 1 : }
1790 6 : if (unlikely (urb->interval != stream->interval)) {
1791 1 : ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1792 1 : stream->interval, urb->interval);
1793 2 : goto done;
1794 : }
1795 :
1796 : #ifdef EHCI_URB_TRACE
1797 : ehci_dbg (ehci,
1798 : "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1799 : __func__, urb->dev->devpath, urb,
1800 : usb_pipeendpoint (urb->pipe),
1801 : usb_pipein (urb->pipe) ? "in" : "out",
1802 : urb->transfer_buffer_length,
1803 : urb->number_of_packets, urb->interval,
1804 : stream);
1805 : #endif
1806 :
1807 : /* allocate ITDs w/o locking anything */
1808 4 : status = itd_urb_transaction (stream, ehci, urb, mem_flags);
1809 4 : if (unlikely (status < 0)) {
1810 : ehci_dbg (ehci, "can't init itds\n");
1811 1 : goto done;
1812 : }
1813 :
1814 : /* schedule ... need to lock */
1815 3 : spin_lock_irqsave (&ehci->lock, flags);
1816 8 : if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
1817 : &ehci_to_hcd(ehci)->flags))) {
1818 1 : status = -ESHUTDOWN;
1819 1 : goto done_not_linked;
1820 : }
1821 3 : status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
1822 4 : if (unlikely(status))
1823 1 : goto done_not_linked;
1824 5 : status = iso_stream_schedule(ehci, urb, stream);
1825 4 : if (likely (status == 0))
1826 2 : itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1827 : else
1828 3 : usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
1829 : done_not_linked:
1830 8 : spin_unlock_irqrestore (&ehci->lock, flags);
1831 2 :
1832 1 : done:
1833 8 : if (unlikely (status < 0))
1834 4 : iso_stream_put (ehci, stream);
1835 2 : return status;
1836 : }
1837 :
1838 : /*-------------------------------------------------------------------------*/
1839 :
1840 : /*
1841 : * "Split ISO TDs" ... used for USB 1.1 devices going through the
1842 : * TTs in USB 2.0 hubs. These need microframe scheduling.
1843 : */
1844 :
1845 : static inline void
1846 : sitd_sched_init(
1847 : struct ehci_hcd *ehci,
1848 : struct ehci_iso_sched *iso_sched,
1849 1 : struct ehci_iso_stream *stream,
1850 1 : struct urb *urb
1851 1 : )
1852 1 : {
1853 1 : unsigned i;
1854 2 : dma_addr_t dma = urb->transfer_dma;
1855 :
1856 : /* how many frames are needed for these transfers */
1857 2 : iso_sched->span = urb->number_of_packets * stream->interval;
1858 :
1859 : /* figure out per-frame sitd fields that we'll need later
1860 : * when we fit new sitds into the schedule.
1861 : */
1862 6 : for (i = 0; i < urb->number_of_packets; i++) {
1863 3 : struct ehci_iso_packet *packet = &iso_sched->packet [i];
1864 1 : unsigned length;
1865 : dma_addr_t buf;
1866 : u32 trans;
1867 :
1868 1 : length = urb->iso_frame_desc [i].length & 0x03ff;
1869 2 : buf = dma + urb->iso_frame_desc [i].offset;
1870 :
1871 1 : trans = SITD_STS_ACTIVE;
1872 5 : if (((i + 1) == urb->number_of_packets)
1873 : && !(urb->transfer_flags & URB_NO_INTERRUPT))
1874 1 : trans |= SITD_IOC;
1875 1 : trans |= length << 16;
1876 2 : packet->transaction = cpu_to_hc32(ehci, trans);
1877 :
1878 : /* might need to cross a buffer page within a td */
1879 1 : packet->bufp = buf;
1880 1 : packet->buf1 = (buf + length) & ~0x0fff;
1881 3 : if (packet->buf1 != (buf & ~(u64)0x0fff))
1882 1 : packet->cross = 1;
1883 :
1884 : /* OUT uses multiple start-splits */
1885 3 : if (stream->bEndpointAddress & USB_DIR_IN)
1886 1 : continue;
1887 1 : length = (length + 187) / 188;
1888 2 : if (length > 1) /* BEGIN vs ALL */
1889 1 : length |= 1 << 3;
1890 1 : packet->buf1 |= length;
1891 1 : }
1892 : }
1893 :
1894 : static int
1895 : sitd_urb_transaction (
1896 : struct ehci_iso_stream *stream,
1897 : struct ehci_hcd *ehci,
1898 1 : struct urb *urb,
1899 1 : gfp_t mem_flags
1900 1 : )
1901 1 : {
1902 1 : struct ehci_sitd *sitd;
1903 1 : dma_addr_t sitd_dma;
1904 1 : int i;
1905 1 : struct ehci_iso_sched *iso_sched;
1906 1 : unsigned long flags;
1907 1 :
1908 4 : iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
1909 2 : if (iso_sched == NULL)
1910 1 : return -ENOMEM;
1911 :
1912 2 : sitd_sched_init(ehci, iso_sched, stream, urb);
1913 :
1914 : /* allocate/init sITDs */
1915 3 : spin_lock_irqsave (&ehci->lock, flags);
1916 5 : for (i = 0; i < urb->number_of_packets; i++) {
1917 1 :
1918 2 : /* NOTE: for now, we don't try to handle wraparound cases
1919 : * for IN (using sitd->hw_backpointer, like a FSTN), which
1920 : * means we never need two sitds for full speed packets.
1921 : */
1922 :
1923 : /* free_list.next might be cache-hot ... but maybe
1924 : * the HC caches it too. avoid that issue for now.
1925 : */
1926 :
1927 : /* prefer previously-allocated sitds */
1928 4 : if (!list_empty(&stream->free_list)) {
1929 2 : sitd = list_entry (stream->free_list.prev,
1930 : struct ehci_sitd, sitd_list);
1931 2 : list_del (&sitd->sitd_list);
1932 1 : sitd_dma = sitd->sitd_dma;
1933 : } else {
1934 2 : spin_unlock_irqrestore (&ehci->lock, flags);
1935 2 : sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
1936 : &sitd_dma);
1937 3 : spin_lock_irqsave (&ehci->lock, flags);
1938 2 : if (!sitd) {
1939 3 : iso_sched_free(stream, iso_sched);
1940 2 : spin_unlock_irqrestore(&ehci->lock, flags);
1941 1 : return -ENOMEM;
1942 : }
1943 : }
1944 :
1945 2 : memset (sitd, 0, sizeof *sitd);
1946 2 : sitd->sitd_dma = sitd_dma;
1947 4 : list_add (&sitd->sitd_list, &iso_sched->td_list);
1948 : }
1949 :
1950 : /* temporarily store schedule info in hcpriv */
1951 1 : urb->hcpriv = iso_sched;
1952 1 : urb->error_count = 0;
1953 :
1954 2 : spin_unlock_irqrestore (&ehci->lock, flags);
1955 1 : return 0;
1956 : }
1957 :
1958 : /*-------------------------------------------------------------------------*/
1959 :
1960 : static inline void
1961 : sitd_patch(
1962 : struct ehci_hcd *ehci,
1963 : struct ehci_iso_stream *stream,
1964 : struct ehci_sitd *sitd,
1965 1 : struct ehci_iso_sched *iso_sched,
1966 1 : unsigned index
1967 : )
1968 : {
1969 1 : struct ehci_iso_packet *uf = &iso_sched->packet [index];
1970 1 : u64 bufp = uf->bufp;
1971 :
1972 2 : sitd->hw_next = EHCI_LIST_END(ehci);
1973 1 : sitd->hw_fullspeed_ep = stream->address;
1974 1 : sitd->hw_uframe = stream->splits;
1975 1 : sitd->hw_results = uf->transaction;
1976 2 : sitd->hw_backpointer = EHCI_LIST_END(ehci);
1977 :
1978 1 : bufp = uf->bufp;
1979 2 : sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp);
1980 2 : sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32);
1981 :
1982 2 : sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1);
1983 3 : if (uf->cross)
1984 1 : bufp += 4096;
1985 2 : sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32);
1986 1 : sitd->index = index;
1987 1 : }
1988 :
1989 : static inline void
1990 : sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
1991 : {
1992 : /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
1993 1 : sitd->sitd_next = ehci->pshadow [frame];
1994 1 : sitd->hw_next = ehci->periodic [frame];
1995 1 : ehci->pshadow [frame].sitd = sitd;
1996 1 : sitd->frame = frame;
1997 1 : wmb ();
1998 3 : ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
1999 1 : }
2000 :
2001 : /* fit urb's sitds into the selected schedule slot; activate as needed */
2002 : static int
2003 : sitd_link_urb (
2004 : struct ehci_hcd *ehci,
2005 : struct urb *urb,
2006 1 : unsigned mod,
2007 1 : struct ehci_iso_stream *stream
2008 1 : )
2009 1 : {
2010 1 : int packet;
2011 1 : unsigned next_uframe;
2012 3 : struct ehci_iso_sched *sched = urb->hcpriv;
2013 1 : struct ehci_sitd *sitd;
2014 1 :
2015 3 : next_uframe = stream->next_uframe;
2016 1 :
2017 5 : if (list_empty(&stream->td_list)) {
2018 : /* usbfs ignores TT bandwidth */
2019 : ehci_to_hcd(ehci)->self.bandwidth_allocated
2020 5 : += stream->bandwidth;
2021 : ehci_vdbg (ehci,
2022 : "sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
2023 : urb->dev->devpath, stream->bEndpointAddress & 0x0f,
2024 : (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
2025 : (next_uframe >> 3) % ehci->periodic_size,
2026 : stream->interval, hc32_to_cpu(ehci, stream->splits));
2027 1 : stream->start = jiffies;
2028 : }
2029 5 : ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++;
2030 :
2031 : /* fill sITDs frame by frame */
2032 5 : for (packet = 0, sitd = NULL;
2033 1 : packet < urb->number_of_packets;
2034 1 : packet++) {
2035 1 :
2036 1 : /* ASSERT: we have all necessary sitds */
2037 8 : BUG_ON (list_empty (&sched->td_list));
2038 :
2039 : /* ASSERT: no itds for this endpoint in this frame */
2040 :
2041 2 : sitd = list_entry (sched->td_list.next,
2042 : struct ehci_sitd, sitd_list);
2043 2 : list_move_tail (&sitd->sitd_list, &stream->td_list);
2044 2 : sitd->stream = iso_stream_get (stream);
2045 1 : sitd->urb = urb;
2046 :
2047 2 : sitd_patch(ehci, stream, sitd, sched, packet);
2048 2 : sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size,
2049 : sitd);
2050 :
2051 2 : next_uframe += stream->interval << 3;
2052 3 : stream->depth += stream->interval << 3;
2053 : }
2054 1 : stream->next_uframe = next_uframe % mod;
2055 :
2056 : /* don't need that schedule data any more */
2057 3 : iso_sched_free (stream, sched);
2058 1 : urb->hcpriv = NULL;
2059 :
2060 4 : timer_action (ehci, TIMER_IO_WATCHDOG);
2061 5 : return enable_periodic(ehci);
2062 : }
2063 :
2064 : /*-------------------------------------------------------------------------*/
2065 :
2066 : #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \
2067 : | SITD_STS_XACT | SITD_STS_MMF)
2068 :
2069 : /* Process and recycle a completed SITD. Return true iff its urb completed,
2070 : * and hence its completion callback probably added things to the hardware
2071 : * schedule.
2072 : *
2073 : * Note that we carefully avoid recycling this descriptor until after any
2074 : * completion callback runs, so that it won't be reused quickly. That is,
2075 : * assuming (a) no more than two urbs per frame on this endpoint, and also
2076 : * (b) only this endpoint's completions submit URBs. It seems some silicon
2077 : * corrupts things if you reuse completed descriptors very quickly...
2078 : */
2079 : static unsigned
2080 : sitd_complete (
2081 : struct ehci_hcd *ehci,
2082 3 : struct ehci_sitd *sitd
2083 3 : ) {
2084 6 : struct urb *urb = sitd->urb;
2085 3 : struct usb_iso_packet_descriptor *desc;
2086 3 : u32 t;
2087 6 : int urb_index = -1;
2088 6 : struct ehci_iso_stream *stream = sitd->stream;
2089 3 : struct usb_device *dev;
2090 6 : unsigned retval = false;
2091 3 :
2092 9 : urb_index = sitd->index;
2093 3 : desc = &urb->iso_frame_desc [urb_index];
2094 6 : t = hc32_to_cpup(ehci, &sitd->hw_results);
2095 :
2096 : /* report transfer status */
2097 6 : if (t & SITD_ERRS) {
2098 3 : urb->error_count++;
2099 6 : if (t & SITD_STS_DBE)
2100 18 : desc->status = usb_pipein (urb->pipe)
2101 : ? -ENOSR /* hc couldn't read */
2102 : : -ECOMM; /* hc couldn't write */
2103 6 : else if (t & SITD_STS_BABBLE)
2104 3 : desc->status = -EOVERFLOW;
2105 : else /* XACT, MMF, etc */
2106 3 : desc->status = -EPROTO;
2107 : } else {
2108 3 : desc->status = 0;
2109 3 : desc->actual_length = desc->length - SITD_LENGTH(t);
2110 3 : urb->actual_length += desc->actual_length;
2111 : }
2112 9 : stream->depth -= stream->interval << 3;
2113 :
2114 : /* handle completion now? */
2115 6 : if ((urb_index + 1) != urb->number_of_packets)
2116 3 : goto done;
2117 :
2118 : /* ASSERT: it's really the last sitd for this urb
2119 : list_for_each_entry (sitd, &stream->td_list, sitd_list)
2120 : BUG_ON (sitd->urb == urb);
2121 : */
2122 :
2123 : /* give urb back to the driver; completion often (re)submits */
2124 3 : dev = urb->dev;
2125 6 : ehci_urb_done(ehci, urb, 0);
2126 3 : retval = true;
2127 3 : urb = NULL;
2128 12 : (void) disable_periodic(ehci);
2129 9 : ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--;
2130 :
2131 12 : if (list_is_singular(&stream->td_list)) {
2132 : ehci_to_hcd(ehci)->self.bandwidth_allocated
2133 15 : -= stream->bandwidth;
2134 : ehci_vdbg (ehci,
2135 : "deschedule devp %s ep%d%s-iso\n",
2136 : dev->devpath, stream->bEndpointAddress & 0x0f,
2137 : (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
2138 : }
2139 24 : iso_stream_put (ehci, stream);
2140 :
2141 3 : done:
2142 6 : sitd->urb = NULL;
2143 12 : if (ehci->clock_frame != sitd->frame) {
2144 : /* OK to recycle this SITD now. */
2145 6 : sitd->stream = NULL;
2146 12 : list_move(&sitd->sitd_list, &stream->free_list);
2147 12 : iso_stream_put(ehci, stream);
2148 : } else {
2149 : /* HW might remember this SITD, so we can't recycle it yet.
2150 : * Move it to a safe place until a new frame starts.
2151 : */
2152 12 : list_move(&sitd->sitd_list, &ehci->cached_sitd_list);
2153 6 : if (stream->refcount == 2) {
2154 : /* If iso_stream_put() were called here, stream
2155 : * would be freed. Instead, just prevent reuse.
2156 : */
2157 3 : stream->ep->hcpriv = NULL;
2158 3 : stream->ep = NULL;
2159 : }
2160 : }
2161 6 : return retval;
2162 : }
2163 :
2164 :
2165 : static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
2166 : gfp_t mem_flags)
2167 1 : {
2168 2 : int status = -EINVAL;
2169 1 : unsigned long flags;
2170 1 : struct ehci_iso_stream *stream;
2171 1 :
2172 1 : /* Get iso_stream head */
2173 3 : stream = iso_stream_find (ehci, urb);
2174 3 : if (stream == NULL) {
2175 1 : ehci_dbg (ehci, "can't get iso stream\n");
2176 2 : return -ENOMEM;
2177 : }
2178 3 : if (urb->interval != stream->interval) {
2179 : ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
2180 : stream->interval, urb->interval);
2181 1 : goto done;
2182 : }
2183 :
2184 : #ifdef EHCI_URB_TRACE
2185 : ehci_dbg (ehci,
2186 : "submit %p dev%s ep%d%s-iso len %d\n",
2187 : urb, urb->dev->devpath,
2188 : usb_pipeendpoint (urb->pipe),
2189 : usb_pipein (urb->pipe) ? "in" : "out",
2190 : urb->transfer_buffer_length);
2191 : #endif
2192 :
2193 : /* allocate SITDs */
2194 4 : status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
2195 2 : if (status < 0) {
2196 : ehci_dbg (ehci, "can't init sitds\n");
2197 1 : goto done;
2198 : }
2199 :
2200 : /* schedule ... need to lock */
2201 3 : spin_lock_irqsave (&ehci->lock, flags);
2202 8 : if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
2203 : &ehci_to_hcd(ehci)->flags))) {
2204 1 : status = -ESHUTDOWN;
2205 1 : goto done_not_linked;
2206 : }
2207 3 : status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
2208 4 : if (unlikely(status))
2209 1 : goto done_not_linked;
2210 5 : status = iso_stream_schedule(ehci, urb, stream);
2211 2 : if (status == 0)
2212 2 : sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
2213 : else
2214 3 : usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
2215 : done_not_linked:
2216 8 : spin_unlock_irqrestore (&ehci->lock, flags);
2217 2 :
2218 1 : done:
2219 6 : if (status < 0)
2220 12 : iso_stream_put (ehci, stream);
2221 4 : return status;
2222 : }
2223 :
2224 : /*-------------------------------------------------------------------------*/
2225 :
2226 : static void free_cached_lists(struct ehci_hcd *ehci)
2227 : {
2228 18 : struct ehci_itd *itd, *n;
2229 18 : struct ehci_sitd *sitd, *sn;
2230 18 :
2231 198 : list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) {
2232 54 : struct ehci_iso_stream *stream = itd->stream;
2233 72 : itd->stream = NULL;
2234 54 : list_move(&itd->itd_list, &stream->free_list);
2235 90 : iso_stream_put(ehci, stream);
2236 18 : }
2237 18 :
2238 198 : list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) {
2239 54 : struct ehci_iso_stream *stream = sitd->stream;
2240 54 : sitd->stream = NULL;
2241 36 : list_move(&sitd->sitd_list, &stream->free_list);
2242 72 : iso_stream_put(ehci, stream);
2243 : }
2244 : }
2245 18 :
2246 : /*-------------------------------------------------------------------------*/
2247 :
2248 : static void
2249 : scan_periodic (struct ehci_hcd *ehci)
2250 : {
2251 3 : unsigned now_uframe, frame, clock, clock_frame, mod;
2252 3 : unsigned modified;
2253 3 :
2254 6 : mod = ehci->periodic_size << 3;
2255 3 :
2256 3 : /*
2257 3 : * When running, scan from last scan point up to "now"
2258 3 : * else clean up by scanning everything that's left.
2259 3 : * Touches as few pages as possible: cache-friendly.
2260 3 : */
2261 9 : now_uframe = ehci->next_uframe;
2262 15 : if (HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
2263 9 : clock = ehci_readl(ehci, &ehci->regs->frame_index);
2264 6 : clock_frame = (clock >> 3) % ehci->periodic_size;
2265 3 : } else {
2266 6 : clock = now_uframe + mod - 1;
2267 6 : clock_frame = -1;
2268 3 : }
2269 15 : if (ehci->clock_frame != clock_frame) {
2270 15 : free_cached_lists(ehci);
2271 6 : ehci->clock_frame = clock_frame;
2272 3 : }
2273 12 : clock %= mod;
2274 12 : clock_frame = clock >> 3;
2275 12 :
2276 3 : for (;;) {
2277 3 : union ehci_shadow q, *q_p;
2278 3 : __hc32 type, *hw_p;
2279 6 : unsigned incomplete = false;
2280 3 :
2281 6 : frame = now_uframe >> 3;
2282 6 :
2283 3 : restart:
2284 3 : /* scan each element in frame's queue for completions */
2285 6 : q_p = &ehci->pshadow [frame];
2286 6 : hw_p = &ehci->periodic [frame];
2287 3 : q.ptr = q_p->ptr;
2288 9 : type = Q_NEXT_TYPE(ehci, *hw_p);
2289 3 : modified = 0;
2290 :
2291 9 : while (q.ptr != NULL) {
2292 3 : unsigned uf;
2293 3 : union ehci_shadow temp;
2294 : int live;
2295 :
2296 9 : live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state);
2297 9 : switch (hc32_to_cpu(ehci, type)) {
2298 9 : case Q_TYPE_QH:
2299 : /* handle any completions */
2300 6 : temp.qh = qh_get (q.qh);
2301 9 : type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next);
2302 3 : q = q.qh->qh_next;
2303 15 : modified = qh_completions (ehci, temp.qh);
2304 33 : if (unlikely(list_empty(&temp.qh->qtd_list) ||
2305 : temp.qh->needs_rescan))
2306 36 : intr_deschedule (ehci, temp.qh);
2307 27 : qh_put (temp.qh);
2308 3 : break;
2309 12 : case Q_TYPE_FSTN:
2310 : /* for "save place" FSTNs, look at QH entries
2311 : * in the previous frame for completions.
2312 : */
2313 6 : if (q.fstn->hw_prev != EHCI_LIST_END(ehci)) {
2314 : dbg ("ignoring completions from FSTNs");
2315 : }
2316 9 : type = Q_NEXT_TYPE(ehci, q.fstn->hw_next);
2317 3 : q = q.fstn->fstn_next;
2318 3 : break;
2319 12 : case Q_TYPE_ITD:
2320 : /* If this ITD is still active, leave it for
2321 : * later processing ... check the next entry.
2322 : * No need to check for activity unless the
2323 : * frame is current.
2324 : */
2325 12 : if (frame == clock_frame && live) {
2326 3 : rmb();
2327 15 : for (uf = 0; uf < 8; uf++) {
2328 18 : if (q.itd->hw_transaction[uf] &
2329 3 : ITD_ACTIVE(ehci))
2330 3 : break;
2331 : }
2332 12 : if (uf < 8) {
2333 9 : incomplete = true;
2334 6 : q_p = &q.itd->itd_next;
2335 6 : hw_p = &q.itd->hw_next;
2336 15 : type = Q_NEXT_TYPE(ehci,
2337 : q.itd->hw_next);
2338 3 : q = *q_p;
2339 3 : break;
2340 : }
2341 : }
2342 :
2343 : /* Take finished ITDs out of the schedule
2344 : * and process them: recycle, maybe report
2345 : * URB completion. HC won't cache the
2346 : * pointer for much longer, if at all.
2347 : */
2348 9 : *q_p = q.itd->itd_next;
2349 9 : *hw_p = q.itd->hw_next;
2350 21 : type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2351 3 : wmb();
2352 9 : modified = itd_complete (ehci, q.itd);
2353 3 : q = *q_p;
2354 3 : break;
2355 12 : case Q_TYPE_SITD:
2356 : /* If this SITD is still active, leave it for
2357 : * later processing ... check the next entry.
2358 : * No need to check for activity unless the
2359 : * frame is current.
2360 : */
2361 24 : if (frame == clock_frame && live &&
2362 : (q.sitd->hw_results &
2363 : SITD_ACTIVE(ehci))) {
2364 3 : incomplete = true;
2365 3 : q_p = &q.sitd->sitd_next;
2366 3 : hw_p = &q.sitd->hw_next;
2367 9 : type = Q_NEXT_TYPE(ehci,
2368 : q.sitd->hw_next);
2369 3 : q = *q_p;
2370 3 : break;
2371 : }
2372 :
2373 : /* Take finished SITDs out of the schedule
2374 : * and process them: recycle, maybe report
2375 : * URB completion.
2376 : */
2377 6 : *q_p = q.sitd->sitd_next;
2378 6 : *hw_p = q.sitd->hw_next;
2379 15 : type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2380 3 : wmb();
2381 9 : modified = sitd_complete (ehci, q.sitd);
2382 3 : q = *q_p;
2383 3 : break;
2384 6 : default:
2385 3 : dbg ("corrupt type %d frame %d shadow %p",
2386 : type, frame, q.ptr);
2387 : // BUG ();
2388 3 : q.ptr = NULL;
2389 3 : }
2390 :
2391 3 : /* assume completion callbacks modify the queue */
2392 48 : if (unlikely (modified)) {
2393 12 : if (likely(ehci->periodic_sched > 0))
2394 3 : goto restart;
2395 : /* short-circuit this scan */
2396 3 : now_uframe = clock;
2397 3 : break;
2398 : }
2399 : }
2400 :
2401 3 : /* If we can tell we caught up to the hardware, stop now.
2402 : * We can't advance our scan without collecting the ISO
2403 : * transfers that are still pending in this frame.
2404 : */
2405 30 : if (incomplete && HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
2406 3 : ehci->next_uframe = now_uframe;
2407 3 : break;
2408 : }
2409 :
2410 : // FIXME: this assumes we won't get lapped when
2411 : // latencies climb; that should be rare, but...
2412 : // detect it, and just go all the way around.
2413 : // FLR might help detect this case, so long as latencies
2414 : // don't exceed periodic_size msec (default 1.024 sec).
2415 :
2416 : // FIXME: likewise assumes HC doesn't halt mid-scan
2417 :
2418 18 : if (now_uframe == clock) {
2419 : unsigned now;
2420 :
2421 30 : if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)
2422 : || ehci->periodic_sched == 0)
2423 3 : break;
2424 3 : ehci->next_uframe = now_uframe;
2425 9 : now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
2426 6 : if (now_uframe == now)
2427 3 : break;
2428 :
2429 : /* rescan the rest of this frame, then ... */
2430 3 : clock = now;
2431 3 : clock_frame = clock >> 3;
2432 6 : if (ehci->clock_frame != clock_frame) {
2433 6 : free_cached_lists(ehci);
2434 3 : ehci->clock_frame = clock_frame;
2435 : }
2436 : } else {
2437 9 : now_uframe++;
2438 9 : now_uframe %= mod;
2439 : }
2440 15 : }
2441 9 : }
|