Line data Source code
1 : /*
2 : * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 : *
4 : * SCSI queueing library.
5 : * Initial versions: Eric Youngdale (eric@andante.org).
6 : * Based upon conversations with large numbers
7 : * of people at Linux Expo.
8 : */
9 :
10 : #include <linux/bio.h>
11 : #include <linux/bitops.h>
12 : #include <linux/blkdev.h>
13 : #include <linux/completion.h>
14 : #include <linux/kernel.h>
15 : #include <linux/mempool.h>
16 : #include <linux/slab.h>
17 : #include <linux/init.h>
18 : #include <linux/pci.h>
19 : #include <linux/delay.h>
20 : #include <linux/hardirq.h>
21 : #include <linux/scatterlist.h>
22 :
23 : #include <scsi/scsi.h>
24 : #include <scsi/scsi_cmnd.h>
25 : #include <scsi/scsi_dbg.h>
26 : #include <scsi/scsi_device.h>
27 : #include <scsi/scsi_driver.h>
28 : #include <scsi/scsi_eh.h>
29 : #include <scsi/scsi_host.h>
30 :
31 : #include "scsi_priv.h"
32 : #include "scsi_logging.h"
33 :
34 :
35 : #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
36 : #define SG_MEMPOOL_SIZE 2
37 :
38 : struct scsi_host_sg_pool {
39 : size_t size;
40 : char *name;
41 : struct kmem_cache *slab;
42 : mempool_t *pool;
43 : };
44 :
45 : #define SP(x) { x, "sgpool-" __stringify(x) }
46 : #if (SCSI_MAX_SG_SEGMENTS < 32)
47 : #error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48 : #endif
49 1 : static struct scsi_host_sg_pool scsi_sg_pools[] = {
50 : SP(8),
51 : SP(16),
52 : #if (SCSI_MAX_SG_SEGMENTS > 32)
53 : SP(32),
54 : #if (SCSI_MAX_SG_SEGMENTS > 64)
55 : SP(64),
56 : #if (SCSI_MAX_SG_SEGMENTS > 128)
57 : SP(128),
58 : #if (SCSI_MAX_SG_SEGMENTS > 256)
59 : #error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
60 : #endif
61 : #endif
62 : #endif
63 : #endif
64 : SP(SCSI_MAX_SG_SEGMENTS)
65 : };
66 : #undef SP
67 :
68 : struct kmem_cache *scsi_sdb_cache;
69 :
70 : static void scsi_run_queue(struct request_queue *q);
71 :
72 : /*
73 : * Function: scsi_unprep_request()
74 : *
75 : * Purpose: Remove all preparation done for a request, including its
76 : * associated scsi_cmnd, so that it can be requeued.
77 : *
78 : * Arguments: req - request to unprepare
79 : *
80 : * Lock status: Assumed that no locks are held upon entry.
81 : *
82 : * Returns: Nothing.
83 : */
84 : static void scsi_unprep_request(struct request *req)
85 : {
86 0 : struct scsi_cmnd *cmd = req->special;
87 :
88 0 : req->cmd_flags &= ~REQ_DONTPREP;
89 0 : req->special = NULL;
90 :
91 0 : scsi_put_command(cmd);
92 0 : }
93 :
94 : /**
95 : * __scsi_queue_insert - private queue insertion
96 : * @cmd: The SCSI command being requeued
97 : * @reason: The reason for the requeue
98 : * @unbusy: Whether the queue should be unbusied
99 : *
100 : * This is a private queue insertion. The public interface
101 : * scsi_queue_insert() always assumes the queue should be unbusied
102 : * because it's always called before the completion. This function is
103 : * for a requeue after completion, which should only occur in this
104 : * file.
105 : */
106 : static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
107 : {
108 0 : struct Scsi_Host *host = cmd->device->host;
109 0 : struct scsi_device *device = cmd->device;
110 0 : struct scsi_target *starget = scsi_target(device);
111 0 : struct request_queue *q = device->request_queue;
112 0 : unsigned long flags;
113 0 :
114 0 : SCSI_LOG_MLQUEUE(1,
115 : printk("Inserting command %p into mlqueue\n", cmd));
116 :
117 : /*
118 : * Set the appropriate busy bit for the device/host.
119 : *
120 : * If the host/device isn't busy, assume that something actually
121 : * completed, and that we should be able to queue a command now.
122 : *
123 : * Note that the prior mid-layer assumption that any host could
124 : * always queue at least one command is now broken. The mid-layer
125 : * will implement a user specifiable stall (see
126 : * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
127 : * if a command is requeued with no other commands outstanding
128 : * either for the device or for the host.
129 : */
130 0 : switch (reason) {
131 0 : case SCSI_MLQUEUE_HOST_BUSY:
132 0 : host->host_blocked = host->max_host_blocked;
133 0 : break;
134 0 : case SCSI_MLQUEUE_DEVICE_BUSY:
135 0 : device->device_blocked = device->max_device_blocked;
136 0 : break;
137 0 : case SCSI_MLQUEUE_TARGET_BUSY:
138 0 : starget->target_blocked = starget->max_target_blocked;
139 0 : break;
140 0 : }
141 :
142 0 : /*
143 : * Decrement the counters, since these commands are no longer
144 : * active on the host/device.
145 : */
146 0 : if (unbusy)
147 0 : scsi_device_unbusy(device);
148 :
149 : /*
150 : * Requeue this command. It will go before all other commands
151 : * that are already in the queue.
152 : *
153 : * NOTE: there is magic here about the way the queue is plugged if
154 : * we have no outstanding commands.
155 : *
156 : * Although we *don't* plug the queue, we call the request
157 : * function. The SCSI request function detects the blocked condition
158 : * and plugs the queue appropriately.
159 : */
160 0 : spin_lock_irqsave(q->queue_lock, flags);
161 0 : blk_requeue_request(q, cmd->request);
162 0 : spin_unlock_irqrestore(q->queue_lock, flags);
163 :
164 0 : scsi_run_queue(q);
165 :
166 0 : return 0;
167 : }
168 :
169 : /*
170 : * Function: scsi_queue_insert()
171 : *
172 : * Purpose: Insert a command in the midlevel queue.
173 : *
174 : * Arguments: cmd - command that we are adding to queue.
175 : * reason - why we are inserting command to queue.
176 : *
177 : * Lock status: Assumed that lock is not held upon entry.
178 : *
179 : * Returns: Nothing.
180 : *
181 : * Notes: We do this for one of two cases. Either the host is busy
182 : * and it cannot accept any more commands for the time being,
183 : * or the device returned QUEUE_FULL and can accept no more
184 : * commands.
185 : * Notes: This could be called either from an interrupt context or a
186 : * normal process context.
187 : */
188 : int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189 : {
190 0 : return __scsi_queue_insert(cmd, reason, 1);
191 : }
192 : /**
193 : * scsi_execute - insert request and wait for the result
194 : * @sdev: scsi device
195 : * @cmd: scsi command
196 : * @data_direction: data direction
197 : * @buffer: data buffer
198 : * @bufflen: len of buffer
199 : * @sense: optional sense buffer
200 : * @timeout: request timeout in seconds
201 : * @retries: number of times to retry request
202 : * @flags: or into request flags;
203 : * @resid: optional residual length
204 : *
205 : * returns the req->errors value which is the scsi_cmnd result
206 : * field.
207 : */
208 : int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
209 : int data_direction, void *buffer, unsigned bufflen,
210 : unsigned char *sense, int timeout, int retries, int flags,
211 : int *resid)
212 144 : {
213 144 : struct request *req;
214 288 : int write = (data_direction == DMA_TO_DEVICE);
215 288 : int ret = DRIVER_ERROR << 24;
216 144 :
217 288 : req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
218 :
219 720 : if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
220 : buffer, bufflen, __GFP_WAIT))
221 144 : goto out;
222 :
223 144 : req->cmd_len = COMMAND_SIZE(cmd[0]);
224 432 : memcpy(req->cmd, cmd, req->cmd_len);
225 144 : req->sense = sense;
226 144 : req->sense_len = 0;
227 144 : req->retries = retries;
228 144 : req->timeout = timeout;
229 144 : req->cmd_type = REQ_TYPE_BLOCK_PC;
230 144 : req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
231 :
232 : /*
233 : * head injection *required* here otherwise quiesce won't work
234 : */
235 144 : blk_execute_rq(req->q, NULL, req, 1);
236 :
237 : /*
238 : * Some devices (USB mass-storage in particular) may transfer
239 : * garbage data together with a residue indicating that the data
240 : * is invalid. Prevent the garbage from being misinterpreted
241 : * and prevent security leaks by zeroing out the excess data.
242 : */
243 1152 : if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 288 : memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245 :
246 576 : if (resid)
247 576 : *resid = req->resid_len;
248 288 : ret = req->errors;
249 288 : out:
250 432 : blk_put_request(req);
251 :
252 432 : return ret;
253 : }
254 : EXPORT_SYMBOL(scsi_execute);
255 :
256 :
257 : int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258 : int data_direction, void *buffer, unsigned bufflen,
259 : struct scsi_sense_hdr *sshdr, int timeout, int retries,
260 : int *resid)
261 72 : {
262 144 : char *sense = NULL;
263 72 : int result;
264 :
265 144 : if (sshdr) {
266 216 : sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
267 144 : if (!sense)
268 72 : return DRIVER_ERROR << 24;
269 : }
270 576 : result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
271 : sense, timeout, retries, 0, resid);
272 144 : if (sshdr)
273 216 : scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
274 :
275 144 : kfree(sense);
276 144 : return result;
277 : }
278 : EXPORT_SYMBOL(scsi_execute_req);
279 :
280 : /*
281 : * Function: scsi_init_cmd_errh()
282 : *
283 : * Purpose: Initialize cmd fields related to error handling.
284 : *
285 : * Arguments: cmd - command that is ready to be queued.
286 : *
287 : * Notes: This function has the job of initializing a number of
288 : * fields related to error handling. Typically this will
289 : * be called once for each command, as required.
290 : */
291 : static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
292 : {
293 0 : cmd->serial_number = 0;
294 0 : scsi_set_resid(cmd, 0);
295 0 : memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
296 0 : if (cmd->cmd_len == 0)
297 0 : cmd->cmd_len = scsi_command_size(cmd->cmnd);
298 0 : }
299 :
300 : void scsi_device_unbusy(struct scsi_device *sdev)
301 : {
302 0 : struct Scsi_Host *shost = sdev->host;
303 0 : struct scsi_target *starget = scsi_target(sdev);
304 0 : unsigned long flags;
305 0 :
306 0 : spin_lock_irqsave(shost->host_lock, flags);
307 0 : shost->host_busy--;
308 0 : starget->target_busy--;
309 0 : if (unlikely(scsi_host_in_recovery(shost) &&
310 0 : (shost->host_failed || shost->host_eh_scheduled)))
311 0 : scsi_eh_wakeup(shost);
312 0 : spin_unlock(shost->host_lock);
313 0 : spin_lock(sdev->request_queue->queue_lock);
314 0 : sdev->device_busy--;
315 0 : spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
316 0 : }
317 :
318 : /*
319 : * Called for single_lun devices on IO completion. Clear starget_sdev_user,
320 : * and call blk_run_queue for all the scsi_devices on the target -
321 : * including current_sdev first.
322 : *
323 : * Called with *no* scsi locks held.
324 : */
325 : static void scsi_single_lun_run(struct scsi_device *current_sdev)
326 : {
327 6 : struct Scsi_Host *shost = current_sdev->host;
328 3 : struct scsi_device *sdev, *tmp;
329 12 : struct scsi_target *starget = scsi_target(current_sdev);
330 3 : unsigned long flags;
331 3 :
332 12 : spin_lock_irqsave(shost->host_lock, flags);
333 6 : starget->starget_sdev_user = NULL;
334 9 : spin_unlock_irqrestore(shost->host_lock, flags);
335 3 :
336 3 : /*
337 3 : * Call blk_run_queue for all LUNs on the target, starting with
338 3 : * current_sdev. We race with others (to set starget_sdev_user),
339 3 : * but in most cases, we will be first. Ideally, each LU on the
340 : * target would get some limited time or requests on the target.
341 : */
342 3 : blk_run_queue(current_sdev->request_queue);
343 :
344 9 : spin_lock_irqsave(shost->host_lock, flags);
345 9 : if (starget->starget_sdev_user)
346 3 : goto out;
347 48 : list_for_each_entry_safe(sdev, tmp, &starget->devices,
348 12 : same_target_siblings) {
349 9 : if (sdev == current_sdev)
350 3 : continue;
351 15 : if (scsi_device_get(sdev))
352 3 : continue;
353 :
354 9 : spin_unlock_irqrestore(shost->host_lock, flags);
355 3 : blk_run_queue(sdev->request_queue);
356 9 : spin_lock_irqsave(shost->host_lock, flags);
357 :
358 6 : scsi_device_put(sdev);
359 : }
360 3 : out:
361 12 : spin_unlock_irqrestore(shost->host_lock, flags);
362 3 : }
363 :
364 : static inline int scsi_device_is_busy(struct scsi_device *sdev)
365 : {
366 0 : if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
367 0 : return 1;
368 :
369 0 : return 0;
370 : }
371 :
372 : static inline int scsi_target_is_busy(struct scsi_target *starget)
373 : {
374 24 : return ((starget->can_queue > 0 &&
375 : starget->target_busy >= starget->can_queue) ||
376 : starget->target_blocked);
377 : }
378 :
379 : static inline int scsi_host_is_busy(struct Scsi_Host *shost)
380 : {
381 27 : if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382 : shost->host_blocked || shost->host_self_blocked)
383 3 : return 1;
384 :
385 3 : return 0;
386 : }
387 :
388 : /*
389 : * Function: scsi_run_queue()
390 : *
391 : * Purpose: Select a proper request queue to serve next
392 : *
393 : * Arguments: q - last request's queue
394 : *
395 : * Returns: Nothing
396 : *
397 : * Notes: The previous command was completely finished, start
398 : * a new one if possible.
399 : */
400 : static void scsi_run_queue(struct request_queue *q)
401 : {
402 9 : struct scsi_device *sdev = q->queuedata;
403 3 : struct Scsi_Host *shost;
404 9 : LIST_HEAD(starved_list);
405 3 : unsigned long flags;
406 3 :
407 3 : /* if the device is dead, sdev will be NULL, so no queue to run */
408 9 : if (!sdev)
409 6 : return;
410 3 :
411 6 : shost = sdev->host;
412 15 : if (scsi_target(sdev)->single_lun)
413 9 : scsi_single_lun_run(sdev);
414 3 :
415 18 : spin_lock_irqsave(shost->host_lock, flags);
416 12 : list_splice_init(&shost->starved_list, &starved_list);
417 :
418 15 : while (!list_empty(&starved_list)) {
419 3 : int flagset;
420 3 :
421 : /*
422 : * As long as shost is accepting commands and we have
423 : * starved queues, call blk_run_queue. scsi_request_fn
424 3 : * drops the queue_lock and can add us back to the
425 : * starved_list.
426 : *
427 : * host_lock protects the starved_list and starved_entry.
428 : * scsi_request_fn must get the host_lock before checking
429 : * or modifying starved_list or starved_entry.
430 : */
431 12 : if (scsi_host_is_busy(shost))
432 3 : break;
433 :
434 6 : sdev = list_entry(starved_list.next,
435 : struct scsi_device, starved_entry);
436 6 : list_del_init(&sdev->starved_entry);
437 18 : if (scsi_target_is_busy(scsi_target(sdev))) {
438 6 : list_move_tail(&sdev->starved_entry,
439 : &shost->starved_list);
440 3 : continue;
441 : }
442 :
443 6 : spin_unlock(shost->host_lock);
444 :
445 6 : spin_lock(sdev->request_queue->queue_lock);
446 39 : flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
447 : !test_bit(QUEUE_FLAG_REENTER,
448 : &sdev->request_queue->queue_flags);
449 12 : if (flagset)
450 12 : queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
451 9 : __blk_run_queue(sdev->request_queue);
452 18 : if (flagset)
453 18 : queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
454 24 : spin_unlock(sdev->request_queue->queue_lock);
455 :
456 6 : spin_lock(shost->host_lock);
457 : }
458 3 : /* put any unprocessed entries back */
459 18 : list_splice(&starved_list, &shost->starved_list);
460 6 : spin_unlock_irqrestore(shost->host_lock, flags);
461 :
462 3 : blk_run_queue(q);
463 3 : }
464 :
465 : /*
466 : * Function: scsi_requeue_command()
467 : *
468 : * Purpose: Handle post-processing of completed commands.
469 : *
470 : * Arguments: q - queue to operate on
471 : * cmd - command that may need to be requeued.
472 : *
473 : * Returns: Nothing
474 : *
475 : * Notes: After command completion, there may be blocks left
476 : * over which weren't finished by the previous command
477 : * this can be for a number of reasons - the main one is
478 : * I/O errors in the middle of the request, in which case
479 : * we need to request the blocks that come after the bad
480 : * sector.
481 : * Notes: Upon return, cmd is a stale pointer.
482 : */
483 : static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
484 : {
485 0 : struct request *req = cmd->request;
486 0 : unsigned long flags;
487 0 :
488 0 : spin_lock_irqsave(q->queue_lock, flags);
489 0 : scsi_unprep_request(req);
490 0 : blk_requeue_request(q, req);
491 0 : spin_unlock_irqrestore(q->queue_lock, flags);
492 :
493 0 : scsi_run_queue(q);
494 0 : }
495 :
496 : void scsi_next_command(struct scsi_cmnd *cmd)
497 : {
498 0 : struct scsi_device *sdev = cmd->device;
499 0 : struct request_queue *q = sdev->request_queue;
500 :
501 : /* need to hold a reference on the device before we let go of the cmd */
502 0 : get_device(&sdev->sdev_gendev);
503 :
504 0 : scsi_put_command(cmd);
505 0 : scsi_run_queue(q);
506 :
507 : /* ok to remove device now */
508 0 : put_device(&sdev->sdev_gendev);
509 0 : }
510 :
511 : void scsi_run_host_queues(struct Scsi_Host *shost)
512 : {
513 0 : struct scsi_device *sdev;
514 :
515 0 : shost_for_each_device(sdev, shost)
516 0 : scsi_run_queue(sdev->request_queue);
517 0 : }
518 :
519 : static void __scsi_release_buffers(struct scsi_cmnd *, int);
520 :
521 : /*
522 0 : * Function: scsi_end_request()
523 : *
524 : * Purpose: Post-processing of completed commands (usually invoked at end
525 : * of upper level post-processing and scsi_io_completion).
526 : *
527 : * Arguments: cmd - command that is complete.
528 : * error - 0 if I/O indicates success, < 0 for I/O error.
529 : * bytes - number of bytes of completed I/O
530 : * requeue - indicates whether we should requeue leftovers.
531 : *
532 : * Lock status: Assumed that lock is not held upon entry.
533 : *
534 : * Returns: cmd if requeue required, NULL otherwise.
535 : *
536 : * Notes: This is called for block device requests in order to
537 : * mark some number of sectors as complete.
538 : *
539 : * We are guaranteeing that the request queue will be goosed
540 : * at some point during this call.
541 : * Notes: If cmd was requeued, upon return it will be a stale pointer.
542 : */
543 : static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
544 : int bytes, int requeue)
545 : {
546 0 : struct request_queue *q = cmd->device->request_queue;
547 0 : struct request *req = cmd->request;
548 0 :
549 0 : /*
550 : * If there are blocks left over at the end, set up the command
551 : * to queue the remainder of them.
552 : */
553 0 : if (blk_end_request(req, error, bytes)) {
554 : /* kill remainder if no retrys */
555 0 : if (error && scsi_noretry_cmd(cmd))
556 0 : blk_end_request_all(req, error);
557 : else {
558 0 : if (requeue) {
559 : /*
560 : * Bleah. Leftovers again. Stick the
561 : * leftovers in the front of the
562 : * queue, and goose the queue again.
563 : */
564 0 : scsi_release_buffers(cmd);
565 0 : scsi_requeue_command(q, cmd);
566 0 : cmd = NULL;
567 : }
568 0 : return cmd;
569 : }
570 : }
571 :
572 : /*
573 : * This will goose the queue request function at the end, so we don't
574 : * need to worry about launching another command.
575 : */
576 0 : __scsi_release_buffers(cmd, 0);
577 0 : scsi_next_command(cmd);
578 0 : return NULL;
579 : }
580 :
581 : static inline unsigned int scsi_sgtable_index(unsigned short nents)
582 : {
583 0 : unsigned int index;
584 0 :
585 0 : BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
586 :
587 0 : if (nents <= 8)
588 0 : index = 0;
589 : else
590 0 : index = get_count_order(nents) - 3;
591 :
592 0 : return index;
593 : }
594 :
595 : static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
596 : {
597 0 : struct scsi_host_sg_pool *sgp;
598 0 :
599 0 : sgp = scsi_sg_pools + scsi_sgtable_index(nents);
600 0 : mempool_free(sgl, sgp->pool);
601 0 : }
602 :
603 : static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
604 : {
605 0 : struct scsi_host_sg_pool *sgp;
606 0 :
607 0 : sgp = scsi_sg_pools + scsi_sgtable_index(nents);
608 0 : return mempool_alloc(sgp->pool, gfp_mask);
609 : }
610 :
611 : static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
612 : gfp_t gfp_mask)
613 0 : {
614 0 : int ret;
615 0 :
616 0 : BUG_ON(!nents);
617 :
618 0 : ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
619 : gfp_mask, scsi_sg_alloc);
620 0 : if (unlikely(ret))
621 0 : __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
622 : scsi_sg_free);
623 :
624 0 : return ret;
625 : }
626 :
627 : static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
628 : {
629 0 : __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
630 0 : }
631 :
632 : static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
633 : {
634 0 :
635 0 : if (cmd->sdb.table.nents)
636 0 : scsi_free_sgtable(&cmd->sdb);
637 :
638 0 : memset(&cmd->sdb, 0, sizeof(cmd->sdb));
639 :
640 0 : if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
641 0 : struct scsi_data_buffer *bidi_sdb =
642 : cmd->request->next_rq->special;
643 0 : scsi_free_sgtable(bidi_sdb);
644 0 : kmem_cache_free(scsi_sdb_cache, bidi_sdb);
645 0 : cmd->request->next_rq->special = NULL;
646 : }
647 :
648 0 : if (scsi_prot_sg_count(cmd))
649 0 : scsi_free_sgtable(cmd->prot_sdb);
650 0 : }
651 :
652 : /*
653 : * Function: scsi_release_buffers()
654 : *
655 : * Purpose: Completion processing for block device I/O requests.
656 : *
657 : * Arguments: cmd - command that we are bailing.
658 : *
659 : * Lock status: Assumed that no lock is held upon entry.
660 : *
661 : * Returns: Nothing
662 : *
663 : * Notes: In the event that an upper level driver rejects a
664 : * command, we must release resources allocated during
665 : * the __init_io() function. Primarily this would involve
666 : * the scatter-gather table, and potentially any bounce
667 : * buffers.
668 : */
669 : void scsi_release_buffers(struct scsi_cmnd *cmd)
670 : {
671 0 : __scsi_release_buffers(cmd, 1);
672 0 : }
673 : EXPORT_SYMBOL(scsi_release_buffers);
674 :
675 : /*
676 : * Function: scsi_io_completion()
677 : *
678 : * Purpose: Completion processing for block device I/O requests.
679 : *
680 : * Arguments: cmd - command that is finished.
681 : *
682 : * Lock status: Assumed that no lock is held upon entry.
683 : *
684 : * Returns: Nothing
685 : *
686 : * Notes: This function is matched in terms of capabilities to
687 : * the function that created the scatter-gather list.
688 : * In other words, if there are no bounce buffers
689 : * (the normal case for most drivers), we don't need
690 : * the logic to deal with cleaning up afterwards.
691 : *
692 : * We must call scsi_end_request(). This will finish off
693 : * the specified number of sectors. If we are done, the
694 : * command block will be released and the queue function
695 : * will be goosed. If we are not done then we have to
696 : * figure out what to do next:
697 : *
698 : * a) We can call scsi_requeue_command(). The request
699 : * will be unprepared and put back on the queue. Then
700 : * a new command will be created for it. This should
701 : * be used if we made forward progress, or if we want
702 : * to switch from READ(10) to READ(6) for example.
703 : *
704 : * b) We can call scsi_queue_insert(). The request will
705 : * be put back on the queue and retried using the same
706 : * command as before, possibly after a delay.
707 : *
708 : * c) We can call blk_end_request() with -EIO to fail
709 : * the remainder of the request.
710 : */
711 : void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
712 : {
713 0 : int result = cmd->result;
714 0 : struct request_queue *q = cmd->device->request_queue;
715 0 : struct request *req = cmd->request;
716 0 : int error = 0;
717 0 : struct scsi_sense_hdr sshdr;
718 0 : int sense_valid = 0;
719 0 : int sense_deferred = 0;
720 1 : enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
721 0 : ACTION_DELAYED_RETRY} action;
722 0 : char *description = NULL;
723 0 :
724 0 : if (result) {
725 0 : sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
726 0 : if (sense_valid)
727 0 : sense_deferred = scsi_sense_is_deferred(&sshdr);
728 0 : }
729 0 :
730 0 : if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
731 0 : req->errors = result;
732 0 : if (result) {
733 0 : if (sense_valid && req->sense) {
734 : /*
735 : * SG_IO wants current and deferred errors
736 : */
737 0 : int len = 8 + cmd->sense_buffer[7];
738 :
739 0 : if (len > SCSI_SENSE_BUFFERSIZE)
740 0 : len = SCSI_SENSE_BUFFERSIZE;
741 0 : memcpy(req->sense, cmd->sense_buffer, len);
742 0 : req->sense_len = len;
743 : }
744 0 : if (!sense_deferred)
745 0 : error = -EIO;
746 : }
747 :
748 0 : req->resid_len = scsi_get_resid(cmd);
749 :
750 0 : if (scsi_bidi_cmnd(cmd)) {
751 : /*
752 : * Bidi commands Must be complete as a whole,
753 : * both sides at once.
754 : */
755 0 : req->next_rq->resid_len = scsi_in(cmd)->resid;
756 :
757 0 : scsi_release_buffers(cmd);
758 0 : blk_end_request_all(req, 0);
759 :
760 0 : scsi_next_command(cmd);
761 0 : return;
762 : }
763 : }
764 :
765 0 : BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
766 :
767 : /*
768 : * Next deal with any sectors which we were able to correctly
769 : * handle.
770 : */
771 : SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
772 : "%d bytes done.\n",
773 : blk_rq_sectors(req), good_bytes));
774 :
775 : /*
776 : * Recovered errors need reporting, but they're always treated
777 : * as success, so fiddle the result code here. For BLOCK_PC
778 : * we already took a copy of the original into rq->errors which
779 : * is what gets returned to the user
780 : */
781 0 : if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
782 : /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
783 : * print since caller wants ATA registers. Only occurs on
784 : * SCSI ATA PASS_THROUGH commands when CK_COND=1
785 : */
786 0 : if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
787 : ;
788 0 : else if (!(req->cmd_flags & REQ_QUIET))
789 0 : scsi_print_sense("", cmd);
790 0 : result = 0;
791 : /* BLOCK_PC may have set error */
792 0 : error = 0;
793 : }
794 :
795 : /*
796 : * A number of bytes were successfully read. If there
797 : * are leftovers and there is some kind of error
798 : * (result != 0), retry the rest.
799 : */
800 0 : if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
801 0 : return;
802 :
803 0 : error = -EIO;
804 :
805 0 : if (host_byte(result) == DID_RESET) {
806 : /* Third party bus reset or reset for error recovery
807 : * reasons. Just retry the command and see what
808 : * happens.
809 : */
810 0 : action = ACTION_RETRY;
811 0 : } else if (sense_valid && !sense_deferred) {
812 : switch (sshdr.sense_key) {
813 0 : case UNIT_ATTENTION:
814 0 : if (cmd->device->removable) {
815 : /* Detected disc change. Set a bit
816 : * and quietly refuse further access.
817 : */
818 0 : cmd->device->changed = 1;
819 0 : description = "Media Changed";
820 0 : action = ACTION_FAIL;
821 : } else {
822 : /* Must have been a power glitch, or a
823 : * bus reset. Could not have been a
824 : * media change, so we just retry the
825 : * command and see what happens.
826 : */
827 0 : action = ACTION_RETRY;
828 : }
829 0 : break;
830 0 : case ILLEGAL_REQUEST:
831 : /* If we had an ILLEGAL REQUEST returned, then
832 : * we may have performed an unsupported
833 : * command. The only thing this should be
834 : * would be a ten byte read where only a six
835 : * byte read was supported. Also, on a system
836 : * where READ CAPACITY failed, we may have
837 : * read past the end of the disk.
838 : */
839 0 : if ((cmd->device->use_10_for_rw &&
840 : sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
841 : (cmd->cmnd[0] == READ_10 ||
842 : cmd->cmnd[0] == WRITE_10)) {
843 : /* This will issue a new 6-byte command. */
844 0 : cmd->device->use_10_for_rw = 0;
845 0 : action = ACTION_REPREP;
846 0 : } else if (sshdr.asc == 0x10) /* DIX */ {
847 0 : description = "Host Data Integrity Failure";
848 0 : action = ACTION_FAIL;
849 0 : error = -EILSEQ;
850 : } else
851 0 : action = ACTION_FAIL;
852 0 : break;
853 0 : case ABORTED_COMMAND:
854 0 : action = ACTION_FAIL;
855 0 : if (sshdr.asc == 0x10) { /* DIF */
856 0 : description = "Target Data Integrity Failure";
857 0 : error = -EILSEQ;
858 : }
859 0 : break;
860 0 : case NOT_READY:
861 : /* If the device is in the process of becoming
862 : * ready, or has a temporary blockage, retry.
863 : */
864 0 : if (sshdr.asc == 0x04) {
865 : switch (sshdr.ascq) {
866 0 : case 0x01: /* becoming ready */
867 0 : case 0x04: /* format in progress */
868 0 : case 0x05: /* rebuild in progress */
869 0 : case 0x06: /* recalculation in progress */
870 0 : case 0x07: /* operation in progress */
871 0 : case 0x08: /* Long write in progress */
872 0 : case 0x09: /* self test in progress */
873 0 : case 0x14: /* space allocation in progress */
874 0 : action = ACTION_DELAYED_RETRY;
875 0 : break;
876 0 : default:
877 0 : description = "Device not ready";
878 0 : action = ACTION_FAIL;
879 0 : break;
880 0 : }
881 0 : } else {
882 0 : description = "Device not ready";
883 0 : action = ACTION_FAIL;
884 0 : }
885 0 : break;
886 0 : case VOLUME_OVERFLOW:
887 : /* See SSC3rXX or current. */
888 0 : action = ACTION_FAIL;
889 0 : break;
890 0 : default:
891 0 : description = "Unhandled sense code";
892 0 : action = ACTION_FAIL;
893 0 : break;
894 : }
895 : } else {
896 0 : description = "Unhandled error code";
897 0 : action = ACTION_FAIL;
898 : }
899 :
900 0 : switch (action) {
901 0 : case ACTION_FAIL:
902 : /* Give up and fail the remainder of the request */
903 0 : scsi_release_buffers(cmd);
904 0 : if (!(req->cmd_flags & REQ_QUIET)) {
905 0 : if (description)
906 0 : scmd_printk(KERN_INFO, cmd, "%s\n",
907 : description);
908 0 : scsi_print_result(cmd);
909 0 : if (driver_byte(result) & DRIVER_SENSE)
910 0 : scsi_print_sense("", cmd);
911 0 : scsi_print_command(cmd);
912 : }
913 0 : if (blk_end_request_err(req, error))
914 0 : scsi_requeue_command(q, cmd);
915 : else
916 0 : scsi_next_command(cmd);
917 0 : break;
918 0 : case ACTION_REPREP:
919 : /* Unprep the request and put it back at the head of the queue.
920 : * A new command will be prepared and issued.
921 : */
922 0 : scsi_release_buffers(cmd);
923 0 : scsi_requeue_command(q, cmd);
924 0 : break;
925 0 : case ACTION_RETRY:
926 : /* Retry the same command immediately */
927 0 : __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
928 0 : break;
929 0 : case ACTION_DELAYED_RETRY:
930 : /* Retry the same command after a delay */
931 0 : __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
932 0 : break;
933 0 : }
934 : }
935 0 :
936 : static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
937 : gfp_t gfp_mask)
938 : {
939 0 : int count;
940 0 :
941 0 : /*
942 0 : * If sg table allocation fails, requeue request later.
943 : */
944 0 : if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
945 : gfp_mask))) {
946 0 : return BLKPREP_DEFER;
947 : }
948 :
949 0 : req->buffer = NULL;
950 :
951 : /*
952 : * Next, walk the list, and fill in the addresses and sizes of
953 : * each segment.
954 : */
955 0 : count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
956 0 : BUG_ON(count > sdb->table.nents);
957 0 : sdb->table.nents = count;
958 0 : sdb->length = blk_rq_bytes(req);
959 0 : return BLKPREP_OK;
960 : }
961 :
962 : /*
963 : * Function: scsi_init_io()
964 : *
965 : * Purpose: SCSI I/O initialize function.
966 : *
967 : * Arguments: cmd - Command descriptor we wish to initialize
968 : *
969 : * Returns: 0 on success
970 : * BLKPREP_DEFER if the failure is retryable
971 : * BLKPREP_KILL if the failure is fatal
972 : */
973 : int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
974 : {
975 0 : int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
976 0 : if (error)
977 0 : goto err_exit;
978 0 :
979 0 : if (blk_bidi_rq(cmd->request)) {
980 0 : struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
981 : scsi_sdb_cache, GFP_ATOMIC);
982 0 : if (!bidi_sdb) {
983 0 : error = BLKPREP_DEFER;
984 0 : goto err_exit;
985 : }
986 :
987 0 : cmd->request->next_rq->special = bidi_sdb;
988 0 : error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
989 : GFP_ATOMIC);
990 0 : if (error)
991 0 : goto err_exit;
992 : }
993 :
994 : if (blk_integrity_rq(cmd->request)) {
995 : struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
996 : int ivecs, count;
997 :
998 : BUG_ON(prot_sdb == NULL);
999 : ivecs = blk_rq_count_integrity_sg(cmd->request);
1000 :
1001 : if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1002 : error = BLKPREP_DEFER;
1003 : goto err_exit;
1004 : }
1005 :
1006 : count = blk_rq_map_integrity_sg(cmd->request,
1007 : prot_sdb->table.sgl);
1008 : BUG_ON(unlikely(count > ivecs));
1009 :
1010 : cmd->prot_sdb = prot_sdb;
1011 : cmd->prot_sdb->table.nents = count;
1012 : }
1013 :
1014 0 : return BLKPREP_OK ;
1015 0 :
1016 : err_exit:
1017 0 : scsi_release_buffers(cmd);
1018 0 : if (error == BLKPREP_KILL)
1019 0 : scsi_put_command(cmd);
1020 : else /* BLKPREP_DEFER */
1021 0 : scsi_unprep_request(cmd->request);
1022 :
1023 0 : return error;
1024 : }
1025 : EXPORT_SYMBOL(scsi_init_io);
1026 :
1027 : static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1028 : struct request *req)
1029 0 : {
1030 0 : struct scsi_cmnd *cmd;
1031 :
1032 0 : if (!req->special) {
1033 0 : cmd = scsi_get_command(sdev, GFP_ATOMIC);
1034 0 : if (unlikely(!cmd))
1035 0 : return NULL;
1036 0 : req->special = cmd;
1037 : } else {
1038 0 : cmd = req->special;
1039 : }
1040 :
1041 : /* pull a tag out of the request if we have one */
1042 0 : cmd->tag = req->tag;
1043 0 : cmd->request = req;
1044 :
1045 0 : cmd->cmnd = req->cmd;
1046 :
1047 0 : return cmd;
1048 : }
1049 :
1050 : int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1051 : {
1052 0 : struct scsi_cmnd *cmd;
1053 0 : int ret = scsi_prep_state_check(sdev, req);
1054 0 :
1055 0 : if (ret != BLKPREP_OK)
1056 0 : return ret;
1057 0 :
1058 0 : cmd = scsi_get_cmd_from_req(sdev, req);
1059 0 : if (unlikely(!cmd))
1060 0 : return BLKPREP_DEFER;
1061 0 :
1062 : /*
1063 : * BLOCK_PC requests may transfer data, in which case they must
1064 : * a bio attached to them. Or they might contain a SCSI command
1065 : * that does not transfer data, in which case they may optionally
1066 : * submit a request without an attached bio.
1067 : */
1068 0 : if (req->bio) {
1069 : int ret;
1070 :
1071 0 : BUG_ON(!req->nr_phys_segments);
1072 :
1073 0 : ret = scsi_init_io(cmd, GFP_ATOMIC);
1074 0 : if (unlikely(ret))
1075 0 : return ret;
1076 : } else {
1077 0 : BUG_ON(blk_rq_bytes(req));
1078 :
1079 0 : memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1080 0 : req->buffer = NULL;
1081 : }
1082 :
1083 0 : cmd->cmd_len = req->cmd_len;
1084 0 : if (!blk_rq_bytes(req))
1085 0 : cmd->sc_data_direction = DMA_NONE;
1086 0 : else if (rq_data_dir(req) == WRITE)
1087 0 : cmd->sc_data_direction = DMA_TO_DEVICE;
1088 : else
1089 0 : cmd->sc_data_direction = DMA_FROM_DEVICE;
1090 :
1091 0 : cmd->transfersize = blk_rq_bytes(req);
1092 0 : cmd->allowed = req->retries;
1093 0 : return BLKPREP_OK;
1094 : }
1095 : EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1096 :
1097 : /*
1098 : * Setup a REQ_TYPE_FS command. These are simple read/write request
1099 : * from filesystems that still need to be translated to SCSI CDBs from
1100 : * the ULD.
1101 : */
1102 : int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1103 : {
1104 0 : struct scsi_cmnd *cmd;
1105 0 : int ret = scsi_prep_state_check(sdev, req);
1106 0 :
1107 0 : if (ret != BLKPREP_OK)
1108 0 : return ret;
1109 0 :
1110 0 : if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1111 0 : && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1112 0 : ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1113 0 : if (ret != BLKPREP_OK)
1114 0 : return ret;
1115 : }
1116 :
1117 : /*
1118 : * Filesystem requests must transfer data.
1119 : */
1120 0 : BUG_ON(!req->nr_phys_segments);
1121 :
1122 0 : cmd = scsi_get_cmd_from_req(sdev, req);
1123 0 : if (unlikely(!cmd))
1124 0 : return BLKPREP_DEFER;
1125 :
1126 0 : memset(cmd->cmnd, 0, BLK_MAX_CDB);
1127 0 : return scsi_init_io(cmd, GFP_ATOMIC);
1128 : }
1129 : EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1130 :
1131 : int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1132 : {
1133 0 : int ret = BLKPREP_OK;
1134 0 :
1135 0 : /*
1136 0 : * If the device is not in running state we will reject some
1137 0 : * or all commands.
1138 0 : */
1139 0 : if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1140 : switch (sdev->sdev_state) {
1141 0 : case SDEV_OFFLINE:
1142 : /*
1143 : * If the device is offline we refuse to process any
1144 : * commands. The device must be brought online
1145 : * before trying any recovery commands.
1146 : */
1147 0 : sdev_printk(KERN_ERR, sdev,
1148 : "rejecting I/O to offline device\n");
1149 0 : ret = BLKPREP_KILL;
1150 0 : break;
1151 0 : case SDEV_DEL:
1152 : /*
1153 : * If the device is fully deleted, we refuse to
1154 : * process any commands as well.
1155 : */
1156 0 : sdev_printk(KERN_ERR, sdev,
1157 : "rejecting I/O to dead device\n");
1158 0 : ret = BLKPREP_KILL;
1159 0 : break;
1160 0 : case SDEV_QUIESCE:
1161 0 : case SDEV_BLOCK:
1162 0 : case SDEV_CREATED_BLOCK:
1163 : /*
1164 : * If the devices is blocked we defer normal commands.
1165 : */
1166 0 : if (!(req->cmd_flags & REQ_PREEMPT))
1167 0 : ret = BLKPREP_DEFER;
1168 0 : break;
1169 0 : default:
1170 0 : /*
1171 : * For any other not fully online state we only allow
1172 : * special commands. In particular any user initiated
1173 : * command is not allowed.
1174 : */
1175 0 : if (!(req->cmd_flags & REQ_PREEMPT))
1176 0 : ret = BLKPREP_KILL;
1177 0 : break;
1178 : }
1179 : }
1180 0 : return ret;
1181 : }
1182 : EXPORT_SYMBOL(scsi_prep_state_check);
1183 :
1184 : int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1185 : {
1186 0 : struct scsi_device *sdev = q->queuedata;
1187 0 :
1188 : switch (ret) {
1189 0 : case BLKPREP_KILL:
1190 0 : req->errors = DID_NO_CONNECT << 16;
1191 : /* release the command and kill it */
1192 0 : if (req->special) {
1193 0 : struct scsi_cmnd *cmd = req->special;
1194 0 : scsi_release_buffers(cmd);
1195 0 : scsi_put_command(cmd);
1196 0 : req->special = NULL;
1197 : }
1198 0 : break;
1199 0 : case BLKPREP_DEFER:
1200 : /*
1201 : * If we defer, the blk_peek_request() returns NULL, but the
1202 : * queue must be restarted, so we plug here if no returning
1203 : * command will automatically do that.
1204 : */
1205 0 : if (sdev->device_busy == 0)
1206 0 : blk_plug_device(q);
1207 0 : break;
1208 0 : default:
1209 0 : req->cmd_flags |= REQ_DONTPREP;
1210 0 : }
1211 :
1212 0 : return ret;
1213 : }
1214 : EXPORT_SYMBOL(scsi_prep_return);
1215 :
1216 : int scsi_prep_fn(struct request_queue *q, struct request *req)
1217 : {
1218 0 : struct scsi_device *sdev = q->queuedata;
1219 0 : int ret = BLKPREP_KILL;
1220 0 :
1221 0 : if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1222 0 : ret = scsi_setup_blk_pc_cmnd(sdev, req);
1223 0 : return scsi_prep_return(q, req, ret);
1224 : }
1225 : EXPORT_SYMBOL(scsi_prep_fn);
1226 :
1227 : /*
1228 : * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1229 : * return 0.
1230 : *
1231 : * Called with the queue_lock held.
1232 : */
1233 : static inline int scsi_dev_queue_ready(struct request_queue *q,
1234 : struct scsi_device *sdev)
1235 0 : {
1236 0 : if (sdev->device_busy == 0 && sdev->device_blocked) {
1237 : /*
1238 : * unblock after device_blocked iterates to zero
1239 : */
1240 0 : if (--sdev->device_blocked == 0) {
1241 : SCSI_LOG_MLQUEUE(3,
1242 : sdev_printk(KERN_INFO, sdev,
1243 : "unblocking device at zero depth\n"));
1244 : } else {
1245 0 : blk_plug_device(q);
1246 0 : return 0;
1247 : }
1248 : }
1249 0 : if (scsi_device_is_busy(sdev))
1250 0 : return 0;
1251 :
1252 0 : return 1;
1253 : }
1254 :
1255 :
1256 : /*
1257 : * scsi_target_queue_ready: checks if there we can send commands to target
1258 : * @sdev: scsi device on starget to check.
1259 : *
1260 : * Called with the host lock held.
1261 : */
1262 : static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1263 : struct scsi_device *sdev)
1264 0 : {
1265 0 : struct scsi_target *starget = scsi_target(sdev);
1266 0 :
1267 0 : if (starget->single_lun) {
1268 0 : if (starget->starget_sdev_user &&
1269 : starget->starget_sdev_user != sdev)
1270 0 : return 0;
1271 0 : starget->starget_sdev_user = sdev;
1272 : }
1273 :
1274 0 : if (starget->target_busy == 0 && starget->target_blocked) {
1275 : /*
1276 : * unblock after target_blocked iterates to zero
1277 : */
1278 0 : if (--starget->target_blocked == 0) {
1279 : SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1280 : "unblocking target at zero depth\n"));
1281 : } else
1282 0 : return 0;
1283 : }
1284 :
1285 0 : if (scsi_target_is_busy(starget)) {
1286 0 : if (list_empty(&sdev->starved_entry)) {
1287 0 : list_add_tail(&sdev->starved_entry,
1288 : &shost->starved_list);
1289 0 : return 0;
1290 : }
1291 : }
1292 :
1293 : /* We're OK to process the command, so we can't be starved */
1294 0 : if (!list_empty(&sdev->starved_entry))
1295 0 : list_del_init(&sdev->starved_entry);
1296 0 : return 1;
1297 : }
1298 :
1299 : /*
1300 : * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1301 : * return 0. We must end up running the queue again whenever 0 is
1302 : * returned, else IO can hang.
1303 : *
1304 : * Called with host_lock held.
1305 : */
1306 : static inline int scsi_host_queue_ready(struct request_queue *q,
1307 : struct Scsi_Host *shost,
1308 : struct scsi_device *sdev)
1309 0 : {
1310 0 : if (scsi_host_in_recovery(shost))
1311 0 : return 0;
1312 0 : if (shost->host_busy == 0 && shost->host_blocked) {
1313 : /*
1314 : * unblock after host_blocked iterates to zero
1315 : */
1316 0 : if (--shost->host_blocked == 0) {
1317 : SCSI_LOG_MLQUEUE(3,
1318 : printk("scsi%d unblocking host at zero depth\n",
1319 : shost->host_no));
1320 : } else {
1321 0 : return 0;
1322 : }
1323 : }
1324 0 : if (scsi_host_is_busy(shost)) {
1325 0 : if (list_empty(&sdev->starved_entry))
1326 0 : list_add_tail(&sdev->starved_entry, &shost->starved_list);
1327 0 : return 0;
1328 : }
1329 :
1330 : /* We're OK to process the command, so we can't be starved */
1331 0 : if (!list_empty(&sdev->starved_entry))
1332 0 : list_del_init(&sdev->starved_entry);
1333 :
1334 0 : return 1;
1335 : }
1336 :
1337 : /*
1338 : * Busy state exporting function for request stacking drivers.
1339 : *
1340 : * For efficiency, no lock is taken to check the busy state of
1341 : * shost/starget/sdev, since the returned value is not guaranteed and
1342 : * may be changed after request stacking drivers call the function,
1343 : * regardless of taking lock or not.
1344 : *
1345 : * When scsi can't dispatch I/Os anymore and needs to kill I/Os
1346 : * (e.g. !sdev), scsi needs to return 'not busy'.
1347 : * Otherwise, request stacking drivers may hold requests forever.
1348 : */
1349 : static int scsi_lld_busy(struct request_queue *q)
1350 : {
1351 0 : struct scsi_device *sdev = q->queuedata;
1352 0 : struct Scsi_Host *shost;
1353 0 : struct scsi_target *starget;
1354 0 :
1355 0 : if (!sdev)
1356 0 : return 0;
1357 0 :
1358 0 : shost = sdev->host;
1359 0 : starget = scsi_target(sdev);
1360 :
1361 0 : if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1362 : scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1363 0 : return 1;
1364 :
1365 0 : return 0;
1366 : }
1367 :
1368 : /*
1369 : * Kill a request for a dead device
1370 : */
1371 : static void scsi_kill_request(struct request *req, struct request_queue *q)
1372 : {
1373 0 : struct scsi_cmnd *cmd = req->special;
1374 0 : struct scsi_device *sdev;
1375 0 : struct scsi_target *starget;
1376 0 : struct Scsi_Host *shost;
1377 0 :
1378 0 : blk_start_request(req);
1379 :
1380 0 : if (unlikely(cmd == NULL)) {
1381 0 : printk(KERN_CRIT "impossible request in %s.\n",
1382 : __func__);
1383 0 : BUG();
1384 : }
1385 :
1386 0 : sdev = cmd->device;
1387 0 : starget = scsi_target(sdev);
1388 0 : shost = sdev->host;
1389 0 : scsi_init_cmd_errh(cmd);
1390 0 : cmd->result = DID_NO_CONNECT << 16;
1391 0 : atomic_inc(&cmd->device->iorequest_cnt);
1392 :
1393 : /*
1394 : * SCSI request completion path will do scsi_device_unbusy(),
1395 : * bump busy counts. To bump the counters, we need to dance
1396 : * with the locks as normal issue path does.
1397 : */
1398 0 : sdev->device_busy++;
1399 0 : spin_unlock(sdev->request_queue->queue_lock);
1400 0 : spin_lock(shost->host_lock);
1401 0 : shost->host_busy++;
1402 0 : starget->target_busy++;
1403 0 : spin_unlock(shost->host_lock);
1404 0 : spin_lock(sdev->request_queue->queue_lock);
1405 :
1406 0 : blk_complete_request(req);
1407 0 : }
1408 :
1409 : static void scsi_softirq_done(struct request *rq)
1410 : {
1411 0 : struct scsi_cmnd *cmd = rq->special;
1412 0 : unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1413 0 : int disposition;
1414 0 :
1415 0 : INIT_LIST_HEAD(&cmd->eh_entry);
1416 0 :
1417 : /*
1418 : * Set the serial numbers back to zero
1419 : */
1420 0 : cmd->serial_number = 0;
1421 :
1422 0 : atomic_inc(&cmd->device->iodone_cnt);
1423 0 : if (cmd->result)
1424 0 : atomic_inc(&cmd->device->ioerr_cnt);
1425 :
1426 0 : disposition = scsi_decide_disposition(cmd);
1427 : if (disposition != SUCCESS &&
1428 0 : time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1429 0 : sdev_printk(KERN_ERR, cmd->device,
1430 : "timing out command, waited %lus\n",
1431 : wait_for/HZ);
1432 0 : disposition = SUCCESS;
1433 : }
1434 :
1435 0 : scsi_log_completion(cmd, disposition);
1436 :
1437 : switch (disposition) {
1438 0 : case SUCCESS:
1439 0 : scsi_finish_command(cmd);
1440 0 : break;
1441 0 : case NEEDS_RETRY:
1442 0 : scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1443 0 : break;
1444 0 : case ADD_TO_MLQUEUE:
1445 0 : scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1446 0 : break;
1447 0 : default:
1448 0 : if (!scsi_eh_scmd_add(cmd, 0))
1449 0 : scsi_finish_command(cmd);
1450 : }
1451 : }
1452 0 :
1453 : /*
1454 0 : * Function: scsi_request_fn()
1455 : *
1456 0 : * Purpose: Main strategy routine for SCSI.
1457 : *
1458 : * Arguments: q - Pointer to actual queue.
1459 : *
1460 : * Returns: Nothing
1461 : *
1462 : * Lock status: IO request lock assumed to be held when called.
1463 : */
1464 : static void scsi_request_fn(struct request_queue *q)
1465 : {
1466 0 : struct scsi_device *sdev = q->queuedata;
1467 0 : struct Scsi_Host *shost;
1468 0 : struct scsi_cmnd *cmd;
1469 0 : struct request *req;
1470 0 :
1471 0 : if (!sdev) {
1472 0 : printk("scsi: killing requests for dead queue\n");
1473 0 : while ((req = blk_peek_request(q)) != NULL)
1474 0 : scsi_kill_request(req, q);
1475 0 : return;
1476 0 : }
1477 0 :
1478 0 : if(!get_device(&sdev->sdev_gendev))
1479 0 : /* We must be tearing the block queue down already */
1480 0 : return;
1481 0 :
1482 0 : /*
1483 0 : * To start with, we keep looping until the queue is empty, or until
1484 0 : * the host is no longer able to accept any more requests.
1485 0 : */
1486 0 : shost = sdev->host;
1487 0 : while (!blk_queue_plugged(q)) {
1488 0 : int rtn;
1489 0 : /*
1490 : * get next queueable request. We do this early to make sure
1491 : * that the request is fully prepared even if we cannot
1492 : * accept it.
1493 0 : */
1494 0 : req = blk_peek_request(q);
1495 0 : if (!req || !scsi_dev_queue_ready(q, sdev))
1496 0 : break;
1497 :
1498 0 : if (unlikely(!scsi_device_online(sdev))) {
1499 0 : sdev_printk(KERN_ERR, sdev,
1500 : "rejecting I/O to offline device\n");
1501 0 : scsi_kill_request(req, q);
1502 0 : continue;
1503 : }
1504 :
1505 :
1506 : /*
1507 : * Remove the request from the request list.
1508 : */
1509 0 : if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1510 0 : blk_start_request(req);
1511 0 : sdev->device_busy++;
1512 :
1513 0 : spin_unlock(q->queue_lock);
1514 0 : cmd = req->special;
1515 0 : if (unlikely(cmd == NULL)) {
1516 0 : printk(KERN_CRIT "impossible request in %s.\n"
1517 : "please mail a stack trace to "
1518 : "linux-scsi@vger.kernel.org\n",
1519 : __func__);
1520 0 : blk_dump_rq_flags(req, "foo");
1521 0 : BUG();
1522 : }
1523 0 : spin_lock(shost->host_lock);
1524 :
1525 : /*
1526 : * We hit this when the driver is using a host wide
1527 : * tag map. For device level tag maps the queue_depth check
1528 : * in the device ready fn would prevent us from trying
1529 : * to allocate a tag. Since the map is a shared host resource
1530 : * we add the dev to the starved list so it eventually gets
1531 : * a run when a tag is freed.
1532 : */
1533 0 : if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1534 0 : if (list_empty(&sdev->starved_entry))
1535 0 : list_add_tail(&sdev->starved_entry,
1536 : &shost->starved_list);
1537 0 : goto not_ready;
1538 : }
1539 :
1540 0 : if (!scsi_target_queue_ready(shost, sdev))
1541 0 : goto not_ready;
1542 :
1543 0 : if (!scsi_host_queue_ready(q, shost, sdev))
1544 0 : goto not_ready;
1545 :
1546 0 : scsi_target(sdev)->target_busy++;
1547 0 : shost->host_busy++;
1548 :
1549 : /*
1550 : * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1551 : * take the lock again.
1552 : */
1553 0 : spin_unlock_irq(shost->host_lock);
1554 :
1555 : /*
1556 : * Finally, initialize any error handling parameters, and set up
1557 : * the timers for timeouts.
1558 : */
1559 0 : scsi_init_cmd_errh(cmd);
1560 :
1561 : /*
1562 : * Dispatch the command to the low-level driver.
1563 : */
1564 0 : rtn = scsi_dispatch_cmd(cmd);
1565 0 : spin_lock_irq(q->queue_lock);
1566 0 : if(rtn) {
1567 : /* we're refusing the command; because of
1568 : * the way locks get dropped, we need to
1569 : * check here if plugging is required */
1570 0 : if(sdev->device_busy == 0)
1571 0 : blk_plug_device(q);
1572 :
1573 0 : break;
1574 : }
1575 : }
1576 :
1577 0 : goto out;
1578 0 :
1579 : not_ready:
1580 0 : spin_unlock_irq(shost->host_lock);
1581 :
1582 : /*
1583 : * lock q, handle tag, requeue req, and decrement device_busy. We
1584 : * must return with queue_lock held.
1585 : *
1586 : * Decrementing device_busy without checking it is OK, as all such
1587 : * cases (host limits or settings) should run the queue at some
1588 : * later time.
1589 : */
1590 0 : spin_lock_irq(q->queue_lock);
1591 0 : blk_requeue_request(q, req);
1592 0 : sdev->device_busy--;
1593 0 : if(sdev->device_busy == 0)
1594 0 : blk_plug_device(q);
1595 : out:
1596 : /* must be careful here...if we trigger the ->remove() function
1597 : * we cannot be holding the q lock */
1598 0 : spin_unlock_irq(q->queue_lock);
1599 0 : put_device(&sdev->sdev_gendev);
1600 0 : spin_lock_irq(q->queue_lock);
1601 0 : }
1602 :
1603 : u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1604 : {
1605 40 : struct device *host_dev;
1606 80 : u64 bounce_limit = 0xffffffff;
1607 :
1608 80 : if (shost->unchecked_isa_dma)
1609 40 : return BLK_BOUNCE_ISA;
1610 : /*
1611 : * Platforms with virtual-DMA translation
1612 : * hardware have no practical limit.
1613 : */
1614 80 : if (!PCI_DMA_BUS_IS_PHYS)
1615 40 : return BLK_BOUNCE_ANY;
1616 :
1617 80 : host_dev = scsi_get_device(shost);
1618 200 : if (host_dev && host_dev->dma_mask)
1619 40 : bounce_limit = *host_dev->dma_mask;
1620 :
1621 40 : return bounce_limit;
1622 : }
1623 : EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1624 :
1625 : struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1626 : request_fn_proc *request_fn)
1627 40 : {
1628 40 : struct request_queue *q;
1629 80 : struct device *dev = shost->shost_gendev.parent;
1630 40 :
1631 40 : q = blk_init_queue(request_fn, NULL);
1632 80 : if (!q)
1633 40 : return NULL;
1634 :
1635 : /*
1636 : * this limit is imposed by hardware restrictions
1637 : */
1638 80 : blk_queue_max_hw_segments(q, shost->sg_tablesize);
1639 40 : blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
1640 :
1641 80 : blk_queue_max_sectors(q, shost->max_sectors);
1642 160 : blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1643 40 : blk_queue_segment_boundary(q, shost->dma_boundary);
1644 80 : dma_set_seg_boundary(dev, shost->dma_boundary);
1645 :
1646 120 : blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1647 :
1648 : /* New queue, no concurrency on queue_flags */
1649 80 : if (!shost->use_clustering)
1650 80 : queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
1651 :
1652 : /*
1653 : * set a reasonable default alignment on word boundaries: the
1654 : * host and device may alter it using
1655 : * blk_queue_update_dma_alignment() later.
1656 : */
1657 80 : blk_queue_dma_alignment(q, 0x03);
1658 :
1659 80 : return q;
1660 : }
1661 : EXPORT_SYMBOL(__scsi_alloc_queue);
1662 :
1663 : struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1664 : {
1665 40 : struct request_queue *q;
1666 :
1667 160 : q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1668 80 : if (!q)
1669 40 : return NULL;
1670 :
1671 40 : blk_queue_prep_rq(q, scsi_prep_fn);
1672 40 : blk_queue_softirq_done(q, scsi_softirq_done);
1673 40 : blk_queue_rq_timed_out(q, scsi_times_out);
1674 40 : blk_queue_lld_busy(q, scsi_lld_busy);
1675 40 : return q;
1676 : }
1677 :
1678 : void scsi_free_queue(struct request_queue *q)
1679 : {
1680 227 : blk_cleanup_queue(q);
1681 227 : }
1682 :
1683 : /*
1684 : * Function: scsi_block_requests()
1685 : *
1686 : * Purpose: Utility function used by low-level drivers to prevent further
1687 : * commands from being queued to the device.
1688 : *
1689 : * Arguments: shost - Host in question
1690 : *
1691 : * Returns: Nothing
1692 : *
1693 : * Lock status: No locks are assumed held.
1694 : *
1695 : * Notes: There is no timer nor any other means by which the requests
1696 : * get unblocked other than the low-level driver calling
1697 : * scsi_unblock_requests().
1698 : */
1699 : void scsi_block_requests(struct Scsi_Host *shost)
1700 : {
1701 0 : shost->host_self_blocked = 1;
1702 0 : }
1703 : EXPORT_SYMBOL(scsi_block_requests);
1704 :
1705 : /*
1706 : * Function: scsi_unblock_requests()
1707 : *
1708 : * Purpose: Utility function used by low-level drivers to allow further
1709 : * commands from being queued to the device.
1710 : *
1711 : * Arguments: shost - Host in question
1712 : *
1713 : * Returns: Nothing
1714 : *
1715 : * Lock status: No locks are assumed held.
1716 : *
1717 : * Notes: There is no timer nor any other means by which the requests
1718 : * get unblocked other than the low-level driver calling
1719 : * scsi_unblock_requests().
1720 : *
1721 : * This is done as an API function so that changes to the
1722 : * internals of the scsi mid-layer won't require wholesale
1723 : * changes to drivers that use this feature.
1724 : */
1725 : void scsi_unblock_requests(struct Scsi_Host *shost)
1726 : {
1727 0 : shost->host_self_blocked = 0;
1728 0 : scsi_run_host_queues(shost);
1729 0 : }
1730 : EXPORT_SYMBOL(scsi_unblock_requests);
1731 :
1732 : int __init scsi_init_queue(void)
1733 : {
1734 1 : int i;
1735 1 :
1736 2 : scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1737 1 : sizeof(struct scsi_data_buffer),
1738 : 0, 0, NULL);
1739 2 : if (!scsi_sdb_cache) {
1740 1 : printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1741 1 : return -ENOMEM;
1742 : }
1743 :
1744 5 : for (i = 0; i < SG_MEMPOOL_NR; i++) {
1745 3 : struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1746 3 : int size = sgp->size * sizeof(struct scatterlist);
1747 :
1748 2 : sgp->slab = kmem_cache_create(sgp->name, size, 0,
1749 : SLAB_HWCACHE_ALIGN, NULL);
1750 3 : if (!sgp->slab) {
1751 1 : printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1752 : sgp->name);
1753 1 : goto cleanup_sdb;
1754 : }
1755 :
1756 2 : sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1757 : sgp->slab);
1758 3 : if (!sgp->pool) {
1759 1 : printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1760 : sgp->name);
1761 1 : goto cleanup_sdb;
1762 : }
1763 : }
1764 :
1765 1 : return 0;
1766 2 :
1767 : cleanup_sdb:
1768 7 : for (i = 0; i < SG_MEMPOOL_NR; i++) {
1769 3 : struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1770 4 : if (sgp->pool)
1771 1 : mempool_destroy(sgp->pool);
1772 3 : if (sgp->slab)
1773 1 : kmem_cache_destroy(sgp->slab);
1774 : }
1775 1 : kmem_cache_destroy(scsi_sdb_cache);
1776 :
1777 1 : return -ENOMEM;
1778 : }
1779 :
1780 : void scsi_exit_queue(void)
1781 : {
1782 4 : int i;
1783 4 :
1784 4 : kmem_cache_destroy(scsi_sdb_cache);
1785 :
1786 20 : for (i = 0; i < SG_MEMPOOL_NR; i++) {
1787 8 : struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1788 12 : mempool_destroy(sgp->pool);
1789 4 : kmem_cache_destroy(sgp->slab);
1790 : }
1791 : }
1792 :
1793 4 : /**
1794 : * scsi_mode_select - issue a mode select
1795 : * @sdev: SCSI device to be queried
1796 : * @pf: Page format bit (1 == standard, 0 == vendor specific)
1797 : * @sp: Save page bit (0 == don't save, 1 == save)
1798 : * @modepage: mode page being requested
1799 : * @buffer: request buffer (may not be smaller than eight bytes)
1800 : * @len: length of request buffer.
1801 : * @timeout: command timeout
1802 : * @retries: number of retries before failing
1803 : * @data: returns a structure abstracting the mode header data
1804 : * @sshdr: place to put sense data (or NULL if no sense to be collected).
1805 : * must be SCSI_SENSE_BUFFERSIZE big.
1806 : *
1807 : * Returns zero if successful; negative error number or scsi
1808 : * status on error
1809 : *
1810 : */
1811 : int
1812 : scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1813 : unsigned char *buffer, int len, int timeout, int retries,
1814 : struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1815 : {
1816 0 : unsigned char cmd[10];
1817 0 : unsigned char *real_buffer;
1818 0 : int ret;
1819 0 :
1820 0 : memset(cmd, 0, sizeof(cmd));
1821 0 : cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1822 :
1823 0 : if (sdev->use_10_for_ms) {
1824 0 : if (len > 65535)
1825 0 : return -EINVAL;
1826 0 : real_buffer = kmalloc(8 + len, GFP_KERNEL);
1827 0 : if (!real_buffer)
1828 0 : return -ENOMEM;
1829 0 : memcpy(real_buffer + 8, buffer, len);
1830 0 : len += 8;
1831 0 : real_buffer[0] = 0;
1832 0 : real_buffer[1] = 0;
1833 0 : real_buffer[2] = data->medium_type;
1834 0 : real_buffer[3] = data->device_specific;
1835 0 : real_buffer[4] = data->longlba ? 0x01 : 0;
1836 0 : real_buffer[5] = 0;
1837 0 : real_buffer[6] = data->block_descriptor_length >> 8;
1838 0 : real_buffer[7] = data->block_descriptor_length;
1839 :
1840 0 : cmd[0] = MODE_SELECT_10;
1841 0 : cmd[7] = len >> 8;
1842 0 : cmd[8] = len;
1843 : } else {
1844 0 : if (len > 255 || data->block_descriptor_length > 255 ||
1845 : data->longlba)
1846 0 : return -EINVAL;
1847 :
1848 0 : real_buffer = kmalloc(4 + len, GFP_KERNEL);
1849 0 : if (!real_buffer)
1850 0 : return -ENOMEM;
1851 0 : memcpy(real_buffer + 4, buffer, len);
1852 0 : len += 4;
1853 0 : real_buffer[0] = 0;
1854 0 : real_buffer[1] = data->medium_type;
1855 0 : real_buffer[2] = data->device_specific;
1856 0 : real_buffer[3] = data->block_descriptor_length;
1857 :
1858 :
1859 0 : cmd[0] = MODE_SELECT;
1860 0 : cmd[4] = len;
1861 : }
1862 :
1863 0 : ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1864 : sshdr, timeout, retries, NULL);
1865 0 : kfree(real_buffer);
1866 0 : return ret;
1867 : }
1868 : EXPORT_SYMBOL_GPL(scsi_mode_select);
1869 :
1870 : /**
1871 : * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1872 : * @sdev: SCSI device to be queried
1873 : * @dbd: set if mode sense will allow block descriptors to be returned
1874 : * @modepage: mode page being requested
1875 : * @buffer: request buffer (may not be smaller than eight bytes)
1876 : * @len: length of request buffer.
1877 : * @timeout: command timeout
1878 : * @retries: number of retries before failing
1879 : * @data: returns a structure abstracting the mode header data
1880 : * @sshdr: place to put sense data (or NULL if no sense to be collected).
1881 : * must be SCSI_SENSE_BUFFERSIZE big.
1882 : *
1883 : * Returns zero if unsuccessful, or the header offset (either 4
1884 : * or 8 depending on whether a six or ten byte command was
1885 : * issued) if successful.
1886 : */
1887 : int
1888 : scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1889 : unsigned char *buffer, int len, int timeout, int retries,
1890 : struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1891 : {
1892 0 : unsigned char cmd[12];
1893 0 : int use_10_for_ms;
1894 0 : int header_length;
1895 0 : int result;
1896 0 : struct scsi_sense_hdr my_sshdr;
1897 0 :
1898 0 : memset(data, 0, sizeof(*data));
1899 0 : memset(&cmd[0], 0, 12);
1900 0 : cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1901 0 : cmd[2] = modepage;
1902 0 :
1903 0 : /* caller might not be interested in sense, but we need it */
1904 0 : if (!sshdr)
1905 0 : sshdr = &my_sshdr;
1906 :
1907 : retry:
1908 0 : use_10_for_ms = sdev->use_10_for_ms;
1909 0 :
1910 0 : if (use_10_for_ms) {
1911 0 : if (len < 8)
1912 0 : len = 8;
1913 :
1914 0 : cmd[0] = MODE_SENSE_10;
1915 0 : cmd[8] = len;
1916 0 : header_length = 8;
1917 : } else {
1918 0 : if (len < 4)
1919 0 : len = 4;
1920 :
1921 0 : cmd[0] = MODE_SENSE;
1922 0 : cmd[4] = len;
1923 0 : header_length = 4;
1924 : }
1925 :
1926 0 : memset(buffer, 0, len);
1927 :
1928 0 : result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1929 : sshdr, timeout, retries, NULL);
1930 :
1931 : /* This code looks awful: what it's doing is making sure an
1932 : * ILLEGAL REQUEST sense return identifies the actual command
1933 : * byte as the problem. MODE_SENSE commands can return
1934 : * ILLEGAL REQUEST if the code page isn't supported */
1935 :
1936 0 : if (use_10_for_ms && !scsi_status_is_good(result) &&
1937 : (driver_byte(result) & DRIVER_SENSE)) {
1938 0 : if (scsi_sense_valid(sshdr)) {
1939 0 : if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1940 : (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1941 : /*
1942 : * Invalid command operation code
1943 : */
1944 0 : sdev->use_10_for_ms = 0;
1945 0 : goto retry;
1946 : }
1947 : }
1948 : }
1949 :
1950 0 : if(scsi_status_is_good(result)) {
1951 0 : if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1952 : (modepage == 6 || modepage == 8))) {
1953 : /* Initio breakage? */
1954 0 : header_length = 0;
1955 0 : data->length = 13;
1956 0 : data->medium_type = 0;
1957 0 : data->device_specific = 0;
1958 0 : data->longlba = 0;
1959 0 : data->block_descriptor_length = 0;
1960 0 : } else if(use_10_for_ms) {
1961 0 : data->length = buffer[0]*256 + buffer[1] + 2;
1962 0 : data->medium_type = buffer[2];
1963 0 : data->device_specific = buffer[3];
1964 0 : data->longlba = buffer[4] & 0x01;
1965 0 : data->block_descriptor_length = buffer[6]*256
1966 : + buffer[7];
1967 : } else {
1968 0 : data->length = buffer[0] + 1;
1969 0 : data->medium_type = buffer[1];
1970 0 : data->device_specific = buffer[2];
1971 0 : data->block_descriptor_length = buffer[3];
1972 : }
1973 0 : data->header_length = header_length;
1974 : }
1975 :
1976 0 : return result;
1977 : }
1978 : EXPORT_SYMBOL(scsi_mode_sense);
1979 :
1980 : /**
1981 : * scsi_test_unit_ready - test if unit is ready
1982 : * @sdev: scsi device to change the state of.
1983 : * @timeout: command timeout
1984 : * @retries: number of retries before failing
1985 : * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
1986 : * returning sense. Make sure that this is cleared before passing
1987 : * in.
1988 : *
1989 : * Returns zero if unsuccessful or an error if TUR failed. For
1990 : * removable media, a return of NOT_READY or UNIT_ATTENTION is
1991 : * translated to success, with the ->changed flag updated.
1992 : **/
1993 : int
1994 : scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
1995 : struct scsi_sense_hdr *sshdr_external)
1996 0 : {
1997 0 : char cmd[] = {
1998 0 : TEST_UNIT_READY, 0, 0, 0, 0, 0,
1999 0 : };
2000 0 : struct scsi_sense_hdr *sshdr;
2001 0 : int result;
2002 0 :
2003 0 : if (!sshdr_external)
2004 0 : sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
2005 : else
2006 0 : sshdr = sshdr_external;
2007 :
2008 0 : /* try to eat the UNIT_ATTENTION if there are enough retries */
2009 : do {
2010 0 : result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2011 : timeout, retries, NULL);
2012 0 : if (sdev->removable && scsi_sense_valid(sshdr) &&
2013 : sshdr->sense_key == UNIT_ATTENTION)
2014 0 : sdev->changed = 1;
2015 : } while (scsi_sense_valid(sshdr) &&
2016 0 : sshdr->sense_key == UNIT_ATTENTION && --retries);
2017 :
2018 0 : if (!sshdr)
2019 : /* could not allocate sense buffer, so can't process it */
2020 0 : return result;
2021 0 :
2022 0 : if (sdev->removable && scsi_sense_valid(sshdr) &&
2023 : (sshdr->sense_key == UNIT_ATTENTION ||
2024 : sshdr->sense_key == NOT_READY)) {
2025 0 : sdev->changed = 1;
2026 0 : result = 0;
2027 : }
2028 0 : if (!sshdr_external)
2029 0 : kfree(sshdr);
2030 0 : return result;
2031 : }
2032 : EXPORT_SYMBOL(scsi_test_unit_ready);
2033 :
2034 : /**
2035 : * scsi_device_set_state - Take the given device through the device state model.
2036 : * @sdev: scsi device to change the state of.
2037 : * @state: state to change to.
2038 : *
2039 : * Returns zero if unsuccessful or an error if the requested
2040 : * transition is illegal.
2041 : */
2042 : int
2043 : scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2044 : {
2045 2084 : enum scsi_device_state oldstate = sdev->sdev_state;
2046 :
2047 2084 : if (state == oldstate)
2048 1042 : return 0;
2049 :
2050 1042 : switch (state) {
2051 4168 : case SDEV_CREATED:
2052 : switch (oldstate) {
2053 3126 : case SDEV_CREATED_BLOCK:
2054 1042 : break;
2055 2084 : default:
2056 2084 : goto illegal;
2057 : }
2058 1042 : break;
2059 1042 :
2060 3126 : case SDEV_RUNNING:
2061 : switch (oldstate) {
2062 3126 : case SDEV_CREATED:
2063 3126 : case SDEV_OFFLINE:
2064 3126 : case SDEV_QUIESCE:
2065 3126 : case SDEV_BLOCK:
2066 1042 : break;
2067 2084 : default:
2068 2084 : goto illegal;
2069 1042 : }
2070 2084 : break;
2071 2084 :
2072 3126 : case SDEV_QUIESCE:
2073 : switch (oldstate) {
2074 3126 : case SDEV_RUNNING:
2075 3126 : case SDEV_OFFLINE:
2076 1042 : break;
2077 2084 : default:
2078 2084 : goto illegal;
2079 1042 : }
2080 1042 : break;
2081 1042 :
2082 3126 : case SDEV_OFFLINE:
2083 : switch (oldstate) {
2084 3126 : case SDEV_CREATED:
2085 3126 : case SDEV_RUNNING:
2086 3126 : case SDEV_QUIESCE:
2087 3126 : case SDEV_BLOCK:
2088 1042 : break;
2089 2084 : default:
2090 2084 : goto illegal;
2091 1042 : }
2092 2084 : break;
2093 2084 :
2094 3126 : case SDEV_BLOCK:
2095 : switch (oldstate) {
2096 3126 : case SDEV_RUNNING:
2097 3126 : case SDEV_CREATED_BLOCK:
2098 1042 : break;
2099 2084 : default:
2100 2084 : goto illegal;
2101 1042 : }
2102 1042 : break;
2103 1042 :
2104 3126 : case SDEV_CREATED_BLOCK:
2105 : switch (oldstate) {
2106 3126 : case SDEV_CREATED:
2107 1042 : break;
2108 2084 : default:
2109 2084 : goto illegal;
2110 : }
2111 1042 : break;
2112 1042 :
2113 3126 : case SDEV_CANCEL:
2114 : switch (oldstate) {
2115 3126 : case SDEV_CREATED:
2116 3126 : case SDEV_RUNNING:
2117 3126 : case SDEV_QUIESCE:
2118 3126 : case SDEV_OFFLINE:
2119 3126 : case SDEV_BLOCK:
2120 1042 : break;
2121 2084 : default:
2122 2084 : goto illegal;
2123 1042 : }
2124 2084 : break;
2125 2084 :
2126 4168 : case SDEV_DEL:
2127 : switch (oldstate) {
2128 3126 : case SDEV_CREATED:
2129 3126 : case SDEV_RUNNING:
2130 3126 : case SDEV_OFFLINE:
2131 3126 : case SDEV_CANCEL:
2132 1042 : break;
2133 2084 : default:
2134 2084 : goto illegal;
2135 1042 : }
2136 2084 : break;
2137 2084 :
2138 : }
2139 2084 : sdev->sdev_state = state;
2140 1042 : return 0;
2141 1042 :
2142 : illegal:
2143 : SCSI_LOG_ERROR_RECOVERY(1,
2144 : sdev_printk(KERN_ERR, sdev,
2145 : "Illegal state transition %s->%s\n",
2146 : scsi_device_state_name(oldstate),
2147 : scsi_device_state_name(state))
2148 : );
2149 1042 : return -EINVAL;
2150 : }
2151 : EXPORT_SYMBOL(scsi_device_set_state);
2152 :
2153 : /**
2154 : * sdev_evt_emit - emit a single SCSI device uevent
2155 : * @sdev: associated SCSI device
2156 : * @evt: event to emit
2157 : *
2158 : * Send a single uevent (scsi_event) to the associated scsi_device.
2159 : */
2160 : static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2161 : {
2162 0 : int idx = 0;
2163 0 : char *envp[3];
2164 0 :
2165 0 : switch (evt->evt_type) {
2166 0 : case SDEV_EVT_MEDIA_CHANGE:
2167 0 : envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2168 0 : break;
2169 0 :
2170 0 : default:
2171 0 : /* do nothing */
2172 0 : break;
2173 : }
2174 :
2175 0 : envp[idx++] = NULL;
2176 :
2177 0 : kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2178 0 : }
2179 :
2180 : /**
2181 : * sdev_evt_thread - send a uevent for each scsi event
2182 : * @work: work struct for scsi_device
2183 : *
2184 : * Dispatch queued events to their associated scsi_device kobjects
2185 : * as uevents.
2186 : */
2187 : void scsi_evt_thread(struct work_struct *work)
2188 : {
2189 0 : struct scsi_device *sdev;
2190 0 : LIST_HEAD(event_list);
2191 0 :
2192 0 : sdev = container_of(work, struct scsi_device, event_work);
2193 0 :
2194 0 : while (1) {
2195 0 : struct scsi_event *evt;
2196 0 : struct list_head *this, *tmp;
2197 0 : unsigned long flags;
2198 0 :
2199 0 : spin_lock_irqsave(&sdev->list_lock, flags);
2200 0 : list_splice_init(&sdev->event_list, &event_list);
2201 0 : spin_unlock_irqrestore(&sdev->list_lock, flags);
2202 :
2203 0 : if (list_empty(&event_list))
2204 0 : break;
2205 :
2206 0 : list_for_each_safe(this, tmp, &event_list) {
2207 0 : evt = list_entry(this, struct scsi_event, node);
2208 0 : list_del(&evt->node);
2209 0 : scsi_evt_emit(sdev, evt);
2210 0 : kfree(evt);
2211 : }
2212 0 : }
2213 0 : }
2214 0 :
2215 : /**
2216 : * sdev_evt_send - send asserted event to uevent thread
2217 : * @sdev: scsi_device event occurred on
2218 : * @evt: event to send
2219 : *
2220 : * Assert scsi device event asynchronously.
2221 : */
2222 : void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2223 : {
2224 0 : unsigned long flags;
2225 0 :
2226 : #if 0
2227 : /* FIXME: currently this check eliminates all media change events
2228 : * for polled devices. Need to update to discriminate between AN
2229 : * and polled events */
2230 : if (!test_bit(evt->evt_type, sdev->supported_events)) {
2231 : kfree(evt);
2232 : return;
2233 : }
2234 : #endif
2235 :
2236 0 : spin_lock_irqsave(&sdev->list_lock, flags);
2237 0 : list_add_tail(&evt->node, &sdev->event_list);
2238 0 : schedule_work(&sdev->event_work);
2239 0 : spin_unlock_irqrestore(&sdev->list_lock, flags);
2240 0 : }
2241 : EXPORT_SYMBOL_GPL(sdev_evt_send);
2242 :
2243 : /**
2244 : * sdev_evt_alloc - allocate a new scsi event
2245 : * @evt_type: type of event to allocate
2246 : * @gfpflags: GFP flags for allocation
2247 : *
2248 : * Allocates and returns a new scsi_event.
2249 : */
2250 : struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2251 : gfp_t gfpflags)
2252 0 : {
2253 0 : struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2254 0 : if (!evt)
2255 0 : return NULL;
2256 :
2257 0 : evt->evt_type = evt_type;
2258 0 : INIT_LIST_HEAD(&evt->node);
2259 :
2260 : /* evt_type-specific initialization, if any */
2261 : switch (evt_type) {
2262 0 : case SDEV_EVT_MEDIA_CHANGE:
2263 : default:
2264 : /* do nothing */
2265 : break;
2266 : }
2267 :
2268 0 : return evt;
2269 : }
2270 : EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2271 :
2272 : /**
2273 : * sdev_evt_send_simple - send asserted event to uevent thread
2274 : * @sdev: scsi_device event occurred on
2275 : * @evt_type: type of event to send
2276 : * @gfpflags: GFP flags for allocation
2277 : *
2278 : * Assert scsi device event asynchronously, given an event type.
2279 : */
2280 : void sdev_evt_send_simple(struct scsi_device *sdev,
2281 : enum scsi_device_event evt_type, gfp_t gfpflags)
2282 : {
2283 0 : struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2284 0 : if (!evt) {
2285 0 : sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2286 0 : evt_type);
2287 0 : return;
2288 : }
2289 :
2290 0 : sdev_evt_send(sdev, evt);
2291 0 : }
2292 : EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2293 :
2294 : /**
2295 : * scsi_device_quiesce - Block user issued commands.
2296 : * @sdev: scsi device to quiesce.
2297 : *
2298 : * This works by trying to transition to the SDEV_QUIESCE state
2299 : * (which must be a legal transition). When the device is in this
2300 : * state, only special requests will be accepted, all others will
2301 : * be deferred. Since special requests may also be requeued requests,
2302 : * a successful return doesn't guarantee the device will be
2303 : * totally quiescent.
2304 : *
2305 : * Must be called with user context, may sleep.
2306 : *
2307 : * Returns zero if unsuccessful or an error if not.
2308 : */
2309 : int
2310 : scsi_device_quiesce(struct scsi_device *sdev)
2311 : {
2312 0 : int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2313 0 : if (err)
2314 0 : return err;
2315 :
2316 0 : scsi_run_queue(sdev->request_queue);
2317 0 : while (sdev->device_busy) {
2318 0 : msleep_interruptible(200);
2319 0 : scsi_run_queue(sdev->request_queue);
2320 : }
2321 0 : return 0;
2322 : }
2323 : EXPORT_SYMBOL(scsi_device_quiesce);
2324 :
2325 : /**
2326 : * scsi_device_resume - Restart user issued commands to a quiesced device.
2327 : * @sdev: scsi device to resume.
2328 : *
2329 : * Moves the device from quiesced back to running and restarts the
2330 : * queues.
2331 : *
2332 : * Must be called with user context, may sleep.
2333 : */
2334 : void
2335 : scsi_device_resume(struct scsi_device *sdev)
2336 : {
2337 15 : if(scsi_device_set_state(sdev, SDEV_RUNNING))
2338 3 : return;
2339 9 : scsi_run_queue(sdev->request_queue);
2340 3 : }
2341 : EXPORT_SYMBOL(scsi_device_resume);
2342 :
2343 : static void
2344 : device_quiesce_fn(struct scsi_device *sdev, void *data)
2345 : {
2346 0 : scsi_device_quiesce(sdev);
2347 0 : }
2348 :
2349 : void
2350 : scsi_target_quiesce(struct scsi_target *starget)
2351 : {
2352 0 : starget_for_each_device(starget, NULL, device_quiesce_fn);
2353 0 : }
2354 : EXPORT_SYMBOL(scsi_target_quiesce);
2355 :
2356 : static void
2357 : device_resume_fn(struct scsi_device *sdev, void *data)
2358 : {
2359 0 : scsi_device_resume(sdev);
2360 0 : }
2361 :
2362 : void
2363 : scsi_target_resume(struct scsi_target *starget)
2364 : {
2365 0 : starget_for_each_device(starget, NULL, device_resume_fn);
2366 0 : }
2367 : EXPORT_SYMBOL(scsi_target_resume);
2368 :
2369 : /**
2370 : * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
2371 : * @sdev: device to block
2372 : *
2373 : * Block request made by scsi lld's to temporarily stop all
2374 : * scsi commands on the specified device. Called from interrupt
2375 : * or normal process context.
2376 : *
2377 : * Returns zero if successful or error if not
2378 : *
2379 : * Notes:
2380 : * This routine transitions the device to the SDEV_BLOCK state
2381 : * (which must be a legal transition). When the device is in this
2382 : * state, all commands are deferred until the scsi lld reenables
2383 : * the device with scsi_device_unblock or device_block_tmo fires.
2384 : * This routine assumes the host_lock is held on entry.
2385 : */
2386 : int
2387 : scsi_internal_device_block(struct scsi_device *sdev)
2388 : {
2389 0 : struct request_queue *q = sdev->request_queue;
2390 0 : unsigned long flags;
2391 0 : int err = 0;
2392 0 :
2393 0 : err = scsi_device_set_state(sdev, SDEV_BLOCK);
2394 0 : if (err) {
2395 0 : err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2396 :
2397 0 : if (err)
2398 0 : return err;
2399 : }
2400 :
2401 : /*
2402 : * The device has transitioned to SDEV_BLOCK. Stop the
2403 : * block layer from calling the midlayer with this device's
2404 : * request queue.
2405 : */
2406 0 : spin_lock_irqsave(q->queue_lock, flags);
2407 0 : blk_stop_queue(q);
2408 0 : spin_unlock_irqrestore(q->queue_lock, flags);
2409 :
2410 0 : return 0;
2411 : }
2412 : EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2413 :
2414 : /**
2415 : * scsi_internal_device_unblock - resume a device after a block request
2416 : * @sdev: device to resume
2417 : *
2418 : * Called by scsi lld's or the midlayer to restart the device queue
2419 : * for the previously suspended scsi device. Called from interrupt or
2420 : * normal process context.
2421 : *
2422 : * Returns zero if successful or error if not.
2423 : *
2424 : * Notes:
2425 : * This routine transitions the device to the SDEV_RUNNING state
2426 : * (which must be a legal transition) allowing the midlayer to
2427 : * goose the queue for this device. This routine assumes the
2428 : * host_lock is held upon entry.
2429 : */
2430 : int
2431 : scsi_internal_device_unblock(struct scsi_device *sdev)
2432 : {
2433 0 : struct request_queue *q = sdev->request_queue;
2434 0 : unsigned long flags;
2435 0 :
2436 : /*
2437 : * Try to transition the scsi device to SDEV_RUNNING
2438 : * and goose the device queue if successful.
2439 : */
2440 0 : if (sdev->sdev_state == SDEV_BLOCK)
2441 0 : sdev->sdev_state = SDEV_RUNNING;
2442 0 : else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2443 0 : sdev->sdev_state = SDEV_CREATED;
2444 0 : else if (sdev->sdev_state != SDEV_CANCEL &&
2445 : sdev->sdev_state != SDEV_OFFLINE)
2446 0 : return -EINVAL;
2447 :
2448 0 : spin_lock_irqsave(q->queue_lock, flags);
2449 0 : blk_start_queue(q);
2450 0 : spin_unlock_irqrestore(q->queue_lock, flags);
2451 :
2452 0 : return 0;
2453 : }
2454 : EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2455 :
2456 : static void
2457 : device_block(struct scsi_device *sdev, void *data)
2458 : {
2459 0 : scsi_internal_device_block(sdev);
2460 0 : }
2461 :
2462 : static int
2463 : target_block(struct device *dev, void *data)
2464 : {
2465 0 : if (scsi_is_target_device(dev))
2466 0 : starget_for_each_device(to_scsi_target(dev), NULL,
2467 : device_block);
2468 0 : return 0;
2469 : }
2470 :
2471 : void
2472 : scsi_target_block(struct device *dev)
2473 : {
2474 0 : if (scsi_is_target_device(dev))
2475 0 : starget_for_each_device(to_scsi_target(dev), NULL,
2476 : device_block);
2477 : else
2478 0 : device_for_each_child(dev, NULL, target_block);
2479 0 : }
2480 : EXPORT_SYMBOL_GPL(scsi_target_block);
2481 :
2482 : static void
2483 : device_unblock(struct scsi_device *sdev, void *data)
2484 : {
2485 0 : scsi_internal_device_unblock(sdev);
2486 0 : }
2487 :
2488 : static int
2489 : target_unblock(struct device *dev, void *data)
2490 : {
2491 0 : if (scsi_is_target_device(dev))
2492 0 : starget_for_each_device(to_scsi_target(dev), NULL,
2493 : device_unblock);
2494 0 : return 0;
2495 : }
2496 :
2497 : void
2498 : scsi_target_unblock(struct device *dev)
2499 : {
2500 0 : if (scsi_is_target_device(dev))
2501 0 : starget_for_each_device(to_scsi_target(dev), NULL,
2502 : device_unblock);
2503 : else
2504 0 : device_for_each_child(dev, NULL, target_unblock);
2505 0 : }
2506 : EXPORT_SYMBOL_GPL(scsi_target_unblock);
2507 :
2508 : /**
2509 : * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2510 : * @sgl: scatter-gather list
2511 : * @sg_count: number of segments in sg
2512 : * @offset: offset in bytes into sg, on return offset into the mapped area
2513 : * @len: bytes to map, on return number of bytes mapped
2514 : *
2515 : * Returns virtual address of the start of the mapped page
2516 : */
2517 : void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2518 : size_t *offset, size_t *len)
2519 : {
2520 0 : int i;
2521 0 : size_t sg_len = 0, len_complete = 0;
2522 0 : struct scatterlist *sg;
2523 0 : struct page *page;
2524 0 :
2525 0 : WARN_ON(!irqs_disabled());
2526 0 :
2527 0 : for_each_sg(sgl, sg, sg_count, i) {
2528 0 : len_complete = sg_len; /* Complete sg-entries */
2529 0 : sg_len += sg->length;
2530 0 : if (sg_len > *offset)
2531 0 : break;
2532 0 : }
2533 0 :
2534 0 : if (unlikely(i == sg_count)) {
2535 0 : printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2536 0 : "elements %d\n",
2537 0 : __func__, sg_len, *offset, sg_count);
2538 0 : WARN_ON(1);
2539 0 : return NULL;
2540 0 : }
2541 0 :
2542 0 : /* Offset starting from the beginning of first page in this sg-entry */
2543 0 : *offset = *offset - len_complete + sg->offset;
2544 :
2545 : /* Assumption: contiguous pages can be accessed as "page + i" */
2546 0 : page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
2547 0 : *offset &= ~PAGE_MASK;
2548 :
2549 : /* Bytes in this sg-entry from *offset to the end of the page */
2550 0 : sg_len = PAGE_SIZE - *offset;
2551 0 : if (*len > sg_len)
2552 0 : *len = sg_len;
2553 :
2554 0 : return kmap_atomic(page, KM_BIO_SRC_IRQ);
2555 : }
2556 : EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2557 :
2558 : /**
2559 : * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
2560 : * @virt: virtual address to be unmapped
2561 : */
2562 : void scsi_kunmap_atomic_sg(void *virt)
2563 : {
2564 0 : kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2565 0 : }
2566 : EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
|