LCOV - code coverage report
Current view: top level - lkbce/include/linux - blkdev.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 21 21 100.0 %
Date: 2017-01-25 Functions: 0 0 -

          Line data    Source code
       1             : #ifndef _LINUX_BLKDEV_H
       2             : #define _LINUX_BLKDEV_H
       3             : 
       4             : #ifdef CONFIG_BLOCK
       5             : 
       6             : #include <linux/sched.h>
       7             : #include <linux/major.h>
       8             : #include <linux/genhd.h>
       9             : #include <linux/list.h>
      10             : #include <linux/timer.h>
      11             : #include <linux/workqueue.h>
      12             : #include <linux/pagemap.h>
      13             : #include <linux/backing-dev.h>
      14             : #include <linux/wait.h>
      15             : #include <linux/mempool.h>
      16             : #include <linux/bio.h>
      17             : #include <linux/module.h>
      18             : #include <linux/stringify.h>
      19             : #include <linux/gfp.h>
      20             : #include <linux/bsg.h>
      21             : #include <linux/smp.h>
      22             : 
      23             : #include <asm/scatterlist.h>
      24             : 
      25             : struct scsi_ioctl_command;
      26             : 
      27             : struct request_queue;
      28             : struct elevator_queue;
      29             : struct request_pm_state;
      30             : struct blk_trace;
      31             : struct request;
      32             : struct sg_io_hdr;
      33             : 
      34             : #define BLKDEV_MIN_RQ   4
      35             : #define BLKDEV_MAX_RQ   128     /* Default maximum */
      36             : 
      37             : struct request;
      38           1 : typedef void (rq_end_io_fn)(struct request *, int);
      39           1 : 
      40             : struct request_list {
      41             :         /*
      42             :          * count[], starved[], and wait[] are indexed by
      43             :          * BLK_RW_SYNC/BLK_RW_ASYNC
      44             :          */
      45             :         int count[2];
      46             :         int starved[2];
      47             :         int elvpriv;
      48             :         mempool_t *rq_pool;
      49             :         wait_queue_head_t wait[2];
      50             : };
      51           1 : 
      52             : /*
      53             :  * request command types
      54             :  */
      55             : enum rq_cmd_type_bits {
      56             :         REQ_TYPE_FS             = 1,    /* fs request */
      57             :         REQ_TYPE_BLOCK_PC,              /* scsi command */
      58             :         REQ_TYPE_SENSE,                 /* sense request */
      59             :         REQ_TYPE_PM_SUSPEND,            /* suspend request */
      60             :         REQ_TYPE_PM_RESUME,             /* resume request */
      61             :         REQ_TYPE_PM_SHUTDOWN,           /* shutdown request */
      62             :         REQ_TYPE_SPECIAL,               /* driver defined type */
      63             :         REQ_TYPE_LINUX_BLOCK,           /* generic block layer message */
      64             :         /*
      65             :          * for ATA/ATAPI devices. this really doesn't belong here, ide should
      66             :          * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
      67             :          * private REQ_LB opcodes to differentiate what type of request this is
      68             :          */
      69             :         REQ_TYPE_ATA_TASKFILE,
      70             :         REQ_TYPE_ATA_PC,
      71             : };
      72             : 
      73             : /*
      74             :  * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
      75             :  * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
      76             :  * SCSI cdb.
      77             :  *
      78             :  * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need,
      79             :  * typically to differentiate REQ_TYPE_SPECIAL requests.
      80             :  *
      81             :  */
      82             : enum {
      83             :         REQ_LB_OP_EJECT = 0x40,         /* eject request */
      84             :         REQ_LB_OP_FLUSH = 0x41,         /* flush request */
      85             : };
      86             : 
      87             : /*
      88             :  * request type modified bits. first four bits match BIO_RW* bits, important
      89             :  */
      90             : enum rq_flag_bits {
      91             :         __REQ_RW,               /* not set, read. set, write */
      92             :         __REQ_FAILFAST_DEV,     /* no driver retries of device errors */
      93             :         __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
      94             :         __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
      95             :         /* above flags must match BIO_RW_* */
      96             :         __REQ_DISCARD,          /* request to discard sectors */
      97             :         __REQ_SORTED,           /* elevator knows about this request */
      98             :         __REQ_SOFTBARRIER,      /* may not be passed by ioscheduler */
      99           2 :         __REQ_HARDBARRIER,      /* may not be passed by drive either */
     100             :         __REQ_FUA,              /* forced unit access */
     101             :         __REQ_NOMERGE,          /* don't touch this for merging */
     102             :         __REQ_STARTED,          /* drive already may have started this one */
     103             :         __REQ_DONTPREP,         /* don't call prep for this one */
     104             :         __REQ_QUEUED,           /* uses queueing */
     105             :         __REQ_ELVPRIV,          /* elevator private data attached */
     106             :         __REQ_FAILED,           /* set if the request failed */
     107             :         __REQ_QUIET,            /* don't worry about errors */
     108             :         __REQ_PREEMPT,          /* set for "ide_preempt" requests */
     109             :         __REQ_ORDERED_COLOR,    /* is before or after barrier */
     110             :         __REQ_RW_SYNC,          /* request is sync (sync write or read) */
     111             :         __REQ_ALLOCED,          /* request came from our alloc pool */
     112             :         __REQ_RW_META,          /* metadata io request */
     113             :         __REQ_COPY_USER,        /* contains copies of user pages */
     114             :         __REQ_INTEGRITY,        /* integrity metadata has been remapped */
     115             :         __REQ_NOIDLE,           /* Don't anticipate more IO after this one */
     116             :         __REQ_IO_STAT,          /* account I/O stat */
     117             :         __REQ_MIXED_MERGE,      /* merge of different types, fail separately */
     118             :         __REQ_NR_BITS,          /* stops here */
     119             : };
     120             : 
     121             : #define REQ_RW          (1 << __REQ_RW)
     122             : #define REQ_FAILFAST_DEV        (1 << __REQ_FAILFAST_DEV)
     123             : #define REQ_FAILFAST_TRANSPORT  (1 << __REQ_FAILFAST_TRANSPORT)
     124             : #define REQ_FAILFAST_DRIVER     (1 << __REQ_FAILFAST_DRIVER)
     125             : #define REQ_DISCARD     (1 << __REQ_DISCARD)
     126             : #define REQ_SORTED      (1 << __REQ_SORTED)
     127             : #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
     128             : #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
     129             : #define REQ_FUA         (1 << __REQ_FUA)
     130             : #define REQ_NOMERGE     (1 << __REQ_NOMERGE)
     131             : #define REQ_STARTED     (1 << __REQ_STARTED)
     132             : #define REQ_DONTPREP    (1 << __REQ_DONTPREP)
     133             : #define REQ_QUEUED      (1 << __REQ_QUEUED)
     134             : #define REQ_ELVPRIV     (1 << __REQ_ELVPRIV)
     135             : #define REQ_FAILED      (1 << __REQ_FAILED)
     136             : #define REQ_QUIET       (1 << __REQ_QUIET)
     137             : #define REQ_PREEMPT     (1 << __REQ_PREEMPT)
     138             : #define REQ_ORDERED_COLOR       (1 << __REQ_ORDERED_COLOR)
     139             : #define REQ_RW_SYNC     (1 << __REQ_RW_SYNC)
     140             : #define REQ_ALLOCED     (1 << __REQ_ALLOCED)
     141             : #define REQ_RW_META     (1 << __REQ_RW_META)
     142             : #define REQ_COPY_USER   (1 << __REQ_COPY_USER)
     143             : #define REQ_INTEGRITY   (1 << __REQ_INTEGRITY)
     144             : #define REQ_NOIDLE      (1 << __REQ_NOIDLE)
     145             : #define REQ_IO_STAT     (1 << __REQ_IO_STAT)
     146             : #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
     147             : 
     148             : #define REQ_FAILFAST_MASK       (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \
     149             :                                  REQ_FAILFAST_DRIVER)
     150             : 
     151             : #define BLK_MAX_CDB     16
     152             : 
     153             : /*
     154             :  * try to put the fields that are referenced together in the same cacheline.
     155             :  * if you modify this structure, be sure to check block/blk-core.c:rq_init()
     156             :  * as well!
     157             :  */
     158             : struct request {
     159             :         struct list_head queuelist;
     160             :         struct call_single_data csd;
     161             :         int cpu;
     162             : 
     163             :         struct request_queue *q;
     164             : 
     165             :         unsigned int cmd_flags;
     166             :         enum rq_cmd_type_bits cmd_type;
     167             :         unsigned long atomic_flags;
     168             : 
     169             :         /* the following two fields are internal, NEVER access directly */
     170             :         sector_t __sector;              /* sector cursor */
     171             :         unsigned int __data_len;        /* total data len */
     172             : 
     173             :         struct bio *bio;
     174             :         struct bio *biotail;
     175             : 
     176             :         struct hlist_node hash; /* merge hash */
     177             :         /*
     178             :          * The rb_node is only used inside the io scheduler, requests
     179             :          * are pruned when moved to the dispatch queue. So let the
     180             :          * completion_data share space with the rb_node.
     181             :          */
     182             :         union {
     183             :                 struct rb_node rb_node; /* sort/lookup */
     184             :                 void *completion_data;
     185             :         };
     186             : 
     187             :         /*
     188             :          * two pointers are available for the IO schedulers, if they need
     189             :          * more they have to dynamically allocate it.
     190             :          */
     191             :         void *elevator_private;
     192             :         void *elevator_private2;
     193             : 
     194             :         struct gendisk *rq_disk;
     195             :         unsigned long start_time;
     196             : 
     197             :         /* Number of scatter-gather DMA addr+len pairs after
     198             :          * physical address coalescing is performed.
     199             :          */
     200             :         unsigned short nr_phys_segments;
     201             : 
     202             :         unsigned short ioprio;
     203             : 
     204             :         void *special;          /* opaque pointer available for LLD use */
     205             :         char *buffer;           /* kaddr of the current segment if available */
     206             : 
     207             :         int tag;
     208             :         int errors;
     209             : 
     210             :         int ref_count;
     211             : 
     212             :         /*
     213             :          * when request is used as a packet command carrier
     214             :          */
     215             :         unsigned short cmd_len;
     216             :         unsigned char __cmd[BLK_MAX_CDB];
     217             :         unsigned char *cmd;
     218             : 
     219             :         unsigned int extra_len; /* length of alignment and padding */
     220             :         unsigned int sense_len;
     221             :         unsigned int resid_len; /* residual count */
     222             :         void *sense;
     223             : 
     224             :         unsigned long deadline;
     225             :         struct list_head timeout_list;
     226             :         unsigned int timeout;
     227             :         int retries;
     228             : 
     229             :         /*
     230             :          * completion callback.
     231             :          */
     232             :         rq_end_io_fn *end_io;
     233             :         void *end_io_data;
     234             : 
     235             :         /* for bidi */
     236             :         struct request *next_rq;
     237             : };
     238             : 
     239             : static inline unsigned short req_get_ioprio(struct request *req)
     240             : {
     241             :         return req->ioprio;
     242             : }
     243             : 
     244             : /*
     245             :  * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
     246             :  * requests. Some step values could eventually be made generic.
     247             :  */
     248             : struct request_pm_state
     249             : {
     250             :         /* PM state machine step value, currently driver specific */
     251             :         int     pm_step;
     252             :         /* requested PM state value (S1, S2, S3, S4, ...) */
     253             :         u32     pm_state;
     254             :         void*   data;           /* for driver use */
     255             : };
     256             : 
     257             : #include <linux/elevator.h>
     258             : 
     259           1 : typedef void (request_fn_proc) (struct request_queue *q);
     260           1 : typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
     261           1 : typedef int (prep_rq_fn) (struct request_queue *, struct request *);
     262           1 : typedef void (unplug_fn) (struct request_queue *);
     263           1 : 
     264             : struct bio_vec;
     265             : struct bvec_merge_data {
     266             :         struct block_device *bi_bdev;
     267             :         sector_t bi_sector;
     268             :         unsigned bi_size;
     269             :         unsigned long bi_rw;
     270             : };
     271           1 : typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
     272             :                              struct bio_vec *);
     273           1 : typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
     274           1 : typedef void (softirq_done_fn)(struct request *);
     275           1 : typedef int (dma_drain_needed_fn)(struct request *);
     276           1 : typedef int (lld_busy_fn) (struct request_queue *q);
     277           1 : 
     278             : enum blk_eh_timer_return {
     279             :         BLK_EH_NOT_HANDLED,
     280             :         BLK_EH_HANDLED,
     281             :         BLK_EH_RESET_TIMER,
     282             : };
     283             : 
     284           1 : typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
     285             : 
     286             : enum blk_queue_state {
     287             :         Queue_down,
     288             :         Queue_up,
     289             : };
     290           1 : 
     291             : struct blk_queue_tag {
     292             :         struct request **tag_index;     /* map of busy tags */
     293             :         unsigned long *tag_map;         /* bit map of free/busy tags */
     294             :         int busy;                       /* current depth */
     295             :         int max_depth;                  /* what we will send to device */
     296             :         int real_max_depth;             /* what the array can hold */
     297             :         atomic_t refcnt;                /* map can be shared */
     298             : };
     299           1 : 
     300             : #define BLK_SCSI_MAX_CMDS       (256)
     301             : #define BLK_SCSI_CMD_PER_LONG   (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
     302             : 
     303             : struct queue_limits {
     304             :         unsigned long           bounce_pfn;
     305             :         unsigned long           seg_boundary_mask;
     306             : 
     307             :         unsigned int            max_hw_sectors;
     308             :         unsigned int            max_sectors;
     309             :         unsigned int            max_segment_size;
     310             :         unsigned int            physical_block_size;
     311             :         unsigned int            alignment_offset;
     312             :         unsigned int            io_min;
     313             :         unsigned int            io_opt;
     314             :         unsigned int            max_discard_sectors;
     315             :         unsigned int            discard_granularity;
     316             :         unsigned int            discard_alignment;
     317             : 
     318             :         unsigned short          logical_block_size;
     319             :         unsigned short          max_hw_segments;
     320             :         unsigned short          max_phys_segments;
     321             : 
     322             :         unsigned char           misaligned;
     323             :         unsigned char           discard_misaligned;
     324             :         unsigned char           no_cluster;
     325             :         signed char             discard_zeroes_data;
     326             : };
     327           1 : 
     328             : struct request_queue
     329             : {
     330             :         /*
     331             :          * Together with queue_head for cacheline sharing
     332             :          */
     333             :         struct list_head        queue_head;
     334             :         struct request          *last_merge;
     335             :         struct elevator_queue   *elevator;
     336             : 
     337             :         /*
     338             :          * the queue request freelist, one for reads and one for writes
     339             :          */
     340             :         struct request_list     rq;
     341             : 
     342             :         request_fn_proc         *request_fn;
     343             :         make_request_fn         *make_request_fn;
     344             :         prep_rq_fn              *prep_rq_fn;
     345             :         unplug_fn               *unplug_fn;
     346             :         merge_bvec_fn           *merge_bvec_fn;
     347             :         prepare_flush_fn        *prepare_flush_fn;
     348             :         softirq_done_fn         *softirq_done_fn;
     349             :         rq_timed_out_fn         *rq_timed_out_fn;
     350             :         dma_drain_needed_fn     *dma_drain_needed;
     351             :         lld_busy_fn             *lld_busy_fn;
     352             : 
     353             :         /*
     354             :          * Dispatch queue sorting
     355             :          */
     356             :         sector_t                end_sector;
     357             :         struct request          *boundary_rq;
     358             : 
     359             :         /*
     360             :          * Auto-unplugging state
     361             :          */
     362             :         struct timer_list       unplug_timer;
     363             :         int                     unplug_thresh;  /* After this many requests */
     364             :         unsigned long           unplug_delay;   /* After this many jiffies */
     365             :         struct work_struct      unplug_work;
     366             : 
     367             :         struct backing_dev_info backing_dev_info;
     368             : 
     369             :         /*
     370             :          * The queue owner gets to use this for whatever they like.
     371             :          * ll_rw_blk doesn't touch it.
     372             :          */
     373             :         void                    *queuedata;
     374             : 
     375             :         /*
     376             :          * queue needs bounce pages for pages above this limit
     377             :          */
     378             :         gfp_t                   bounce_gfp;
     379             : 
     380             :         /*
     381             :          * various queue flags, see QUEUE_* below
     382             :          */
     383             :         unsigned long           queue_flags;
     384             : 
     385             :         /*
     386             :          * protects queue structures from reentrancy. ->__queue_lock should
     387             :          * _never_ be used directly, it is queue private. always use
     388             :          * ->queue_lock.
     389             :          */
     390             :         spinlock_t              __queue_lock;
     391             :         spinlock_t              *queue_lock;
     392             : 
     393             :         /*
     394             :          * queue kobject
     395             :          */
     396             :         struct kobject kobj;
     397             : 
     398             :         /*
     399             :          * queue settings
     400             :          */
     401             :         unsigned long           nr_requests;    /* Max # of requests */
     402             :         unsigned int            nr_congestion_on;
     403             :         unsigned int            nr_congestion_off;
     404             :         unsigned int            nr_batching;
     405             : 
     406             :         void                    *dma_drain_buffer;
     407             :         unsigned int            dma_drain_size;
     408             :         unsigned int            dma_pad_mask;
     409             :         unsigned int            dma_alignment;
     410             : 
     411             :         struct blk_queue_tag    *queue_tags;
     412             :         struct list_head        tag_busy_list;
     413             : 
     414             :         unsigned int            nr_sorted;
     415             :         unsigned int            in_flight[2];
     416             : 
     417             :         unsigned int            rq_timeout;
     418             :         struct timer_list       timeout;
     419             :         struct list_head        timeout_list;
     420             : 
     421             :         struct queue_limits     limits;
     422             : 
     423             :         /*
     424             :          * sg stuff
     425             :          */
     426             :         unsigned int            sg_timeout;
     427             :         unsigned int            sg_reserved_size;
     428             :         int                     node;
     429             : #ifdef CONFIG_BLK_DEV_IO_TRACE
     430             :         struct blk_trace        *blk_trace;
     431             : #endif
     432             :         /*
     433             :          * reserved for flush operations
     434             :          */
     435             :         unsigned int            ordered, next_ordered, ordseq;
     436             :         int                     orderr, ordcolor;
     437             :         struct request          pre_flush_rq, bar_rq, post_flush_rq;
     438             :         struct request          *orig_bar_rq;
     439             : 
     440             :         struct mutex            sysfs_lock;
     441             : 
     442             : #if defined(CONFIG_BLK_DEV_BSG)
     443             :         struct bsg_class_device bsg_dev;
     444             : #endif
     445             : };
     446             : 
     447             : #define QUEUE_FLAG_CLUSTER      0       /* cluster several segments into 1 */
     448             : #define QUEUE_FLAG_QUEUED       1       /* uses generic tag queueing */
     449             : #define QUEUE_FLAG_STOPPED      2       /* queue is stopped */
     450             : #define QUEUE_FLAG_SYNCFULL     3       /* read queue has been filled */
     451             : #define QUEUE_FLAG_ASYNCFULL    4       /* write queue has been filled */
     452             : #define QUEUE_FLAG_DEAD         5       /* queue being torn down */
     453             : #define QUEUE_FLAG_REENTER      6       /* Re-entrancy avoidance */
     454             : #define QUEUE_FLAG_PLUGGED      7       /* queue is plugged */
     455             : #define QUEUE_FLAG_ELVSWITCH    8       /* don't use elevator, just do FIFO */
     456             : #define QUEUE_FLAG_BIDI         9       /* queue supports bidi requests */
     457             : #define QUEUE_FLAG_NOMERGES    10       /* disable merge attempts */
     458             : #define QUEUE_FLAG_SAME_COMP   11       /* force complete on same CPU */
     459             : #define QUEUE_FLAG_FAIL_IO     12       /* fake timeout */
     460             : #define QUEUE_FLAG_STACKABLE   13       /* supports request stacking */
     461             : #define QUEUE_FLAG_NONROT      14       /* non-rotational device (SSD) */
     462             : #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
     463             : #define QUEUE_FLAG_IO_STAT     15       /* do IO stats */
     464             : #define QUEUE_FLAG_DISCARD     16       /* supports DISCARD */
     465             : 
     466             : #define QUEUE_FLAG_DEFAULT      ((1 << QUEUE_FLAG_IO_STAT) |              \
     467             :                                  (1 << QUEUE_FLAG_CLUSTER) |              \
     468             :                                  (1 << QUEUE_FLAG_STACKABLE)      |       \
     469             :                                  (1 << QUEUE_FLAG_SAME_COMP))
     470             : 
     471             : static inline int queue_is_locked(struct request_queue *q)
     472             : {
     473             : #ifdef CONFIG_SMP
     474             :         spinlock_t *lock = q->queue_lock;
     475             :         return lock && spin_is_locked(lock);
     476             : #else
     477             :         return 1;
     478             : #endif
     479             : }
     480             : 
     481             : static inline void queue_flag_set_unlocked(unsigned int flag,
     482             :                                            struct request_queue *q)
     483             : {
     484             :         __set_bit(flag, &q->queue_flags);
     485             : }
     486             : 
     487             : static inline int queue_flag_test_and_clear(unsigned int flag,
     488             :                                             struct request_queue *q)
     489             : {
     490             :         WARN_ON_ONCE(!queue_is_locked(q));
     491             : 
     492             :         if (test_bit(flag, &q->queue_flags)) {
     493             :                 __clear_bit(flag, &q->queue_flags);
     494             :                 return 1;
     495             :         }
     496             : 
     497             :         return 0;
     498             : }
     499             : 
     500             : static inline int queue_flag_test_and_set(unsigned int flag,
     501             :                                           struct request_queue *q)
     502             : {
     503             :         WARN_ON_ONCE(!queue_is_locked(q));
     504             : 
     505             :         if (!test_bit(flag, &q->queue_flags)) {
     506             :                 __set_bit(flag, &q->queue_flags);
     507             :                 return 0;
     508             :         }
     509             : 
     510             :         return 1;
     511             : }
     512             : 
     513             : static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
     514             : {
     515             :         WARN_ON_ONCE(!queue_is_locked(q));
     516             :         __set_bit(flag, &q->queue_flags);
     517             : }
     518             : 
     519             : static inline void queue_flag_clear_unlocked(unsigned int flag,
     520             :                                              struct request_queue *q)
     521             : {
     522             :         __clear_bit(flag, &q->queue_flags);
     523             : }
     524             : 
     525             : static inline int queue_in_flight(struct request_queue *q)
     526             : {
     527             :         return q->in_flight[0] + q->in_flight[1];
     528             : }
     529             : 
     530             : static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
     531             : {
     532             :         WARN_ON_ONCE(!queue_is_locked(q));
     533             :         __clear_bit(flag, &q->queue_flags);
     534             : }
     535             : 
     536             : enum {
     537             :         /*
     538             :          * Hardbarrier is supported with one of the following methods.
     539             :          *
     540             :          * NONE         : hardbarrier unsupported
     541             :          * DRAIN        : ordering by draining is enough
     542             :          * DRAIN_FLUSH  : ordering by draining w/ pre and post flushes
     543             :          * DRAIN_FUA    : ordering by draining w/ pre flush and FUA write
     544             :          * TAG          : ordering by tag is enough
     545             :          * TAG_FLUSH    : ordering by tag w/ pre and post flushes
     546             :          * TAG_FUA      : ordering by tag w/ pre flush and FUA write
     547             :          */
     548             :         QUEUE_ORDERED_BY_DRAIN          = 0x01,
     549             :         QUEUE_ORDERED_BY_TAG            = 0x02,
     550             :         QUEUE_ORDERED_DO_PREFLUSH       = 0x10,
     551             :         QUEUE_ORDERED_DO_BAR            = 0x20,
     552             :         QUEUE_ORDERED_DO_POSTFLUSH      = 0x40,
     553             :         QUEUE_ORDERED_DO_FUA            = 0x80,
     554             : 
     555             :         QUEUE_ORDERED_NONE              = 0x00,
     556             : 
     557             :         QUEUE_ORDERED_DRAIN             = QUEUE_ORDERED_BY_DRAIN |
     558             :                                           QUEUE_ORDERED_DO_BAR,
     559             :         QUEUE_ORDERED_DRAIN_FLUSH       = QUEUE_ORDERED_DRAIN |
     560             :                                           QUEUE_ORDERED_DO_PREFLUSH |
     561             :                                           QUEUE_ORDERED_DO_POSTFLUSH,
     562             :         QUEUE_ORDERED_DRAIN_FUA         = QUEUE_ORDERED_DRAIN |
     563             :                                           QUEUE_ORDERED_DO_PREFLUSH |
     564             :                                           QUEUE_ORDERED_DO_FUA,
     565             : 
     566             :         QUEUE_ORDERED_TAG               = QUEUE_ORDERED_BY_TAG |
     567             :                                           QUEUE_ORDERED_DO_BAR,
     568             :         QUEUE_ORDERED_TAG_FLUSH         = QUEUE_ORDERED_TAG |
     569             :                                           QUEUE_ORDERED_DO_PREFLUSH |
     570             :                                           QUEUE_ORDERED_DO_POSTFLUSH,
     571             :         QUEUE_ORDERED_TAG_FUA           = QUEUE_ORDERED_TAG |
     572             :                                           QUEUE_ORDERED_DO_PREFLUSH |
     573             :                                           QUEUE_ORDERED_DO_FUA,
     574             : 
     575             :         /*
     576             :          * Ordered operation sequence
     577             :          */
     578             :         QUEUE_ORDSEQ_STARTED    = 0x01, /* flushing in progress */
     579             :         QUEUE_ORDSEQ_DRAIN      = 0x02, /* waiting for the queue to be drained */
     580             :         QUEUE_ORDSEQ_PREFLUSH   = 0x04, /* pre-flushing in progress */
     581             :         QUEUE_ORDSEQ_BAR        = 0x08, /* original barrier req in progress */
     582             :         QUEUE_ORDSEQ_POSTFLUSH  = 0x10, /* post-flushing in progress */
     583             :         QUEUE_ORDSEQ_DONE       = 0x20,
     584             : };
     585             : 
     586             : #define blk_queue_plugged(q)    test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
     587             : #define blk_queue_tagged(q)     test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
     588             : #define blk_queue_stopped(q)    test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
     589             : #define blk_queue_nomerges(q)   test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
     590             : #define blk_queue_nonrot(q)     test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
     591             : #define blk_queue_io_stat(q)    test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
     592             : #define blk_queue_flushing(q)   ((q)->ordseq)
     593             : #define blk_queue_stackable(q)  \
     594             :         test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
     595             : #define blk_queue_discard(q)    test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
     596             : 
     597             : #define blk_fs_request(rq)      ((rq)->cmd_type == REQ_TYPE_FS)
     598             : #define blk_pc_request(rq)      ((rq)->cmd_type == REQ_TYPE_BLOCK_PC)
     599             : #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
     600             : #define blk_sense_request(rq)   ((rq)->cmd_type == REQ_TYPE_SENSE)
     601             : 
     602             : #define blk_failfast_dev(rq)    ((rq)->cmd_flags & REQ_FAILFAST_DEV)
     603             : #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
     604             : #define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
     605             : #define blk_noretry_request(rq) (blk_failfast_dev(rq) ||        \
     606             :                                  blk_failfast_transport(rq) ||  \
     607             :                                  blk_failfast_driver(rq))
     608             : #define blk_rq_started(rq)      ((rq)->cmd_flags & REQ_STARTED)
     609             : #define blk_rq_io_stat(rq)      ((rq)->cmd_flags & REQ_IO_STAT)
     610             : #define blk_rq_quiet(rq)        ((rq)->cmd_flags & REQ_QUIET)
     611             : 
     612             : #define blk_account_rq(rq)      (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 
     613             : 
     614             : #define blk_pm_suspend_request(rq)      ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND)
     615             : #define blk_pm_resume_request(rq)       ((rq)->cmd_type == REQ_TYPE_PM_RESUME)
     616             : #define blk_pm_request(rq)      \
     617             :         (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq))
     618             : 
     619             : #define blk_rq_cpu_valid(rq)    ((rq)->cpu != -1)
     620             : #define blk_sorted_rq(rq)       ((rq)->cmd_flags & REQ_SORTED)
     621             : #define blk_barrier_rq(rq)      ((rq)->cmd_flags & REQ_HARDBARRIER)
     622             : #define blk_fua_rq(rq)          ((rq)->cmd_flags & REQ_FUA)
     623             : #define blk_discard_rq(rq)      ((rq)->cmd_flags & REQ_DISCARD)
     624             : #define blk_bidi_rq(rq)         ((rq)->next_rq != NULL)
     625             : /* rq->queuelist of dequeued request must be list_empty() */
     626             : #define blk_queued_rq(rq)       (!list_empty(&(rq)->queuelist))
     627             : 
     628             : #define list_entry_rq(ptr)      list_entry((ptr), struct request, queuelist)
     629             : 
     630             : #define rq_data_dir(rq)         ((rq)->cmd_flags & 1)
     631             : 
     632             : /*
     633             :  * We regard a request as sync, if either a read or a sync write
     634             :  */
     635             : static inline bool rw_is_sync(unsigned int rw_flags)
     636             : {
     637             :         return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
     638             : }
     639             : 
     640             : static inline bool rq_is_sync(struct request *rq)
     641             : {
     642             :         return rw_is_sync(rq->cmd_flags);
     643             : }
     644             : 
     645             : #define rq_is_meta(rq)          ((rq)->cmd_flags & REQ_RW_META)
     646             : #define rq_noidle(rq)           ((rq)->cmd_flags & REQ_NOIDLE)
     647             : 
     648             : static inline int blk_queue_full(struct request_queue *q, int sync)
     649             : {
     650             :         if (sync)
     651             :                 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
     652             :         return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
     653             : }
     654             : 
     655             : static inline void blk_set_queue_full(struct request_queue *q, int sync)
     656             : {
     657             :         if (sync)
     658             :                 queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
     659             :         else
     660             :                 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
     661             : }
     662             : 
     663             : static inline void blk_clear_queue_full(struct request_queue *q, int sync)
     664             : {
     665             :         if (sync)
     666             :                 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
     667             :         else
     668             :                 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
     669             : }
     670             : 
     671             : 
     672             : /*
     673             :  * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
     674             :  * it already be started by driver.
     675             :  */
     676             : #define RQ_NOMERGE_FLAGS        \
     677             :         (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
     678             : #define rq_mergeable(rq)        \
     679             :         (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
     680             :          (blk_discard_rq(rq) || blk_fs_request((rq))))
     681             : 
     682             : /*
     683             :  * q->prep_rq_fn return values
     684             :  */
     685             : #define BLKPREP_OK              0       /* serve it */
     686             : #define BLKPREP_KILL            1       /* fatal error, kill */
     687             : #define BLKPREP_DEFER           2       /* leave on queue */
     688             : 
     689             : extern unsigned long blk_max_low_pfn, blk_max_pfn;
     690             : 
     691             : /*
     692             :  * standard bounce addresses:
     693             :  *
     694             :  * BLK_BOUNCE_HIGH      : bounce all highmem pages
     695             :  * BLK_BOUNCE_ANY       : don't bounce anything
     696             :  * BLK_BOUNCE_ISA       : bounce pages above ISA DMA boundary
     697             :  */
     698             : 
     699             : #if BITS_PER_LONG == 32
     700             : #define BLK_BOUNCE_HIGH         ((u64)blk_max_low_pfn << PAGE_SHIFT)
     701             : #else
     702             : #define BLK_BOUNCE_HIGH         -1ULL
     703             : #endif
     704             : #define BLK_BOUNCE_ANY          (-1ULL)
     705             : #define BLK_BOUNCE_ISA          (ISA_DMA_THRESHOLD)
     706             : 
     707             : /*
     708             :  * default timeout for SG_IO if none specified
     709             :  */
     710             : #define BLK_DEFAULT_SG_TIMEOUT  (60 * HZ)
     711             : #define BLK_MIN_SG_TIMEOUT      (7 * HZ)
     712             : 
     713             : #ifdef CONFIG_BOUNCE
     714             : extern int init_emergency_isa_pool(void);
     715             : extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
     716             : #else
     717             : static inline int init_emergency_isa_pool(void)
     718             : {
     719             :         return 0;
     720             : }
     721             : static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
     722             : {
     723             : }
     724             : #endif /* CONFIG_MMU */
     725             : 
     726             : struct rq_map_data {
     727             :         struct page **pages;
     728             :         int page_order;
     729             :         int nr_entries;
     730             :         unsigned long offset;
     731             :         int null_mapped;
     732             :         int from_user;
     733             : };
     734             : 
     735             : struct req_iterator {
     736             :         int i;
     737             :         struct bio *bio;
     738             : };
     739             : 
     740             : /* This should not be used directly - use rq_for_each_segment */
     741             : #define for_each_bio(_bio)              \
     742             :         for (; _bio; _bio = _bio->bi_next)
     743             : #define __rq_for_each_bio(_bio, rq)     \
     744             :         if ((rq->bio))                       \
     745             :                 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
     746             : 
     747             : #define rq_for_each_segment(bvl, _rq, _iter)                    \
     748             :         __rq_for_each_bio(_iter.bio, _rq)                       \
     749             :                 bio_for_each_segment(bvl, _iter.bio, _iter.i)
     750             : 
     751             : #define rq_iter_last(rq, _iter)                                 \
     752             :                 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
     753             : 
     754             : #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
     755             : # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
     756             : #endif
     757             : #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
     758             : extern void rq_flush_dcache_pages(struct request *rq);
     759             : #else
     760             : static inline void rq_flush_dcache_pages(struct request *rq)
     761             : {
     762             : }
     763             : #endif
     764             : 
     765             : extern int blk_register_queue(struct gendisk *disk);
     766             : extern void blk_unregister_queue(struct gendisk *disk);
     767             : extern void register_disk(struct gendisk *dev);
     768             : extern void generic_make_request(struct bio *bio);
     769             : extern void blk_rq_init(struct request_queue *q, struct request *rq);
     770             : extern void blk_put_request(struct request *);
     771             : extern void __blk_put_request(struct request_queue *, struct request *);
     772             : extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
     773             : extern struct request *blk_make_request(struct request_queue *, struct bio *,
     774             :                                         gfp_t);
     775             : extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
     776             : extern void blk_requeue_request(struct request_queue *, struct request *);
     777             : extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
     778             : extern int blk_lld_busy(struct request_queue *q);
     779             : extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
     780             :                              struct bio_set *bs, gfp_t gfp_mask,
     781             :                              int (*bio_ctr)(struct bio *, struct bio *, void *),
     782             :                              void *data);
     783             : extern void blk_rq_unprep_clone(struct request *rq);
     784             : extern int blk_insert_cloned_request(struct request_queue *q,
     785             :                                      struct request *rq);
     786             : extern void blk_plug_device(struct request_queue *);
     787             : extern void blk_plug_device_unlocked(struct request_queue *);
     788             : extern int blk_remove_plug(struct request_queue *);
     789             : extern void blk_recount_segments(struct request_queue *, struct bio *);
     790             : extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
     791             :                           unsigned int, void __user *);
     792             : extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
     793             :                          struct scsi_ioctl_command __user *);
     794             : 
     795             : /*
     796             :  * A queue has just exitted congestion.  Note this in the global counter of
     797             :  * congested queues, and wake up anyone who was waiting for requests to be
     798             :  * put back.
     799             :  */
     800             : static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
     801             : {
     802             :         clear_bdi_congested(&q->backing_dev_info, sync);
     803             : }
     804             : 
     805             : /*
     806             :  * A queue has just entered congestion.  Flag that in the queue's VM-visible
     807             :  * state flags and increment the global gounter of congested queues.
     808             :  */
     809             : static inline void blk_set_queue_congested(struct request_queue *q, int sync)
     810             : {
     811             :         set_bdi_congested(&q->backing_dev_info, sync);
     812             : }
     813             : 
     814             : extern void blk_start_queue(struct request_queue *q);
     815             : extern void blk_stop_queue(struct request_queue *q);
     816             : extern void blk_sync_queue(struct request_queue *q);
     817             : extern void __blk_stop_queue(struct request_queue *q);
     818             : extern void __blk_run_queue(struct request_queue *);
     819             : extern void blk_run_queue(struct request_queue *);
     820             : extern int blk_rq_map_user(struct request_queue *, struct request *,
     821             :                            struct rq_map_data *, void __user *, unsigned long,
     822             :                            gfp_t);
     823             : extern int blk_rq_unmap_user(struct bio *);
     824             : extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
     825             : extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
     826             :                                struct rq_map_data *, struct sg_iovec *, int,
     827             :                                unsigned int, gfp_t);
     828             : extern int blk_execute_rq(struct request_queue *, struct gendisk *,
     829             :                           struct request *, int);
     830             : extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
     831             :                                   struct request *, int, rq_end_io_fn *);
     832             : extern void blk_unplug(struct request_queue *q);
     833             : 
     834             : static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
     835             : {
     836             :         return bdev->bd_disk->queue;
     837             : }
     838             : 
     839             : /*
     840             :  * blk_rq_pos()                 : the current sector
     841             :  * blk_rq_bytes()               : bytes left in the entire request
     842             :  * blk_rq_cur_bytes()           : bytes left in the current segment
     843             :  * blk_rq_err_bytes()           : bytes left till the next error boundary
     844             :  * blk_rq_sectors()             : sectors left in the entire request
     845             :  * blk_rq_cur_sectors()         : sectors left in the current segment
     846             :  */
     847             : static inline sector_t blk_rq_pos(const struct request *rq)
     848             : {
     849             :         return rq->__sector;
     850             : }
     851             : 
     852             : static inline unsigned int blk_rq_bytes(const struct request *rq)
     853             : {
     854             :         return rq->__data_len;
     855             : }
     856             : 
     857             : static inline int blk_rq_cur_bytes(const struct request *rq)
     858             : {
     859             :         return rq->bio ? bio_cur_bytes(rq->bio) : 0;
     860             : }
     861             : 
     862             : extern unsigned int blk_rq_err_bytes(const struct request *rq);
     863             : 
     864             : static inline unsigned int blk_rq_sectors(const struct request *rq)
     865             : {
     866             :         return blk_rq_bytes(rq) >> 9;
     867             : }
     868             : 
     869             : static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
     870             : {
     871             :         return blk_rq_cur_bytes(rq) >> 9;
     872             : }
     873             : 
     874             : /*
     875             :  * Request issue related functions.
     876             :  */
     877             : extern struct request *blk_peek_request(struct request_queue *q);
     878             : extern void blk_start_request(struct request *rq);
     879             : extern struct request *blk_fetch_request(struct request_queue *q);
     880             : 
     881             : /*
     882             :  * Request completion related functions.
     883             :  *
     884             :  * blk_update_request() completes given number of bytes and updates
     885             :  * the request without completing it.
     886             :  *
     887             :  * blk_end_request() and friends.  __blk_end_request() must be called
     888             :  * with the request queue spinlock acquired.
     889             :  *
     890             :  * Several drivers define their own end_request and call
     891             :  * blk_end_request() for parts of the original function.
     892             :  * This prevents code duplication in drivers.
     893             :  */
     894             : extern bool blk_update_request(struct request *rq, int error,
     895             :                                unsigned int nr_bytes);
     896             : extern bool blk_end_request(struct request *rq, int error,
     897             :                             unsigned int nr_bytes);
     898             : extern void blk_end_request_all(struct request *rq, int error);
     899             : extern bool blk_end_request_cur(struct request *rq, int error);
     900             : extern bool blk_end_request_err(struct request *rq, int error);
     901             : extern bool __blk_end_request(struct request *rq, int error,
     902             :                               unsigned int nr_bytes);
     903             : extern void __blk_end_request_all(struct request *rq, int error);
     904             : extern bool __blk_end_request_cur(struct request *rq, int error);
     905             : extern bool __blk_end_request_err(struct request *rq, int error);
     906             : 
     907             : extern void blk_complete_request(struct request *);
     908             : extern void __blk_complete_request(struct request *);
     909             : extern void blk_abort_request(struct request *);
     910             : extern void blk_abort_queue(struct request_queue *);
     911             : 
     912             : /*
     913             :  * Access functions for manipulating queue properties
     914             :  */
     915             : extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
     916             :                                         spinlock_t *lock, int node_id);
     917             : extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
     918             : extern void blk_cleanup_queue(struct request_queue *);
     919             : extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
     920             : extern void blk_queue_bounce_limit(struct request_queue *, u64);
     921             : extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
     922             : extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
     923             : extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
     924             : extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
     925             : extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
     926             : extern void blk_queue_max_discard_sectors(struct request_queue *q,
     927             :                 unsigned int max_discard_sectors);
     928             : extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
     929             : extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
     930             : extern void blk_queue_alignment_offset(struct request_queue *q,
     931             :                                        unsigned int alignment);
     932             : extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
     933             : extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
     934             : extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
     935             : extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
     936             : extern void blk_set_default_limits(struct queue_limits *lim);
     937             : extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
     938             :                             sector_t offset);
     939             : extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
     940             :                             sector_t offset);
     941             : extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
     942             :                               sector_t offset);
     943             : extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
     944             : extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
     945             : extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
     946             : extern int blk_queue_dma_drain(struct request_queue *q,
     947             :                                dma_drain_needed_fn *dma_drain_needed,
     948             :                                void *buf, unsigned int size);
     949             : extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
     950             : extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
     951             : extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
     952             : extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
     953             : extern void blk_queue_dma_alignment(struct request_queue *, int);
     954             : extern void blk_queue_update_dma_alignment(struct request_queue *, int);
     955             : extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
     956             : extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
     957             : extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
     958             : extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
     959             : extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
     960             : extern bool blk_do_ordered(struct request_queue *, struct request **);
     961             : extern unsigned blk_ordered_cur_seq(struct request_queue *);
     962             : extern unsigned blk_ordered_req_seq(struct request *);
     963             : extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
     964             : 
     965             : extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
     966             : extern void blk_dump_rq_flags(struct request *, char *);
     967             : extern void generic_unplug_device(struct request_queue *);
     968             : extern long nr_blockdev_pages(void);
     969             : 
     970             : int blk_get_queue(struct request_queue *);
     971             : struct request_queue *blk_alloc_queue(gfp_t);
     972             : struct request_queue *blk_alloc_queue_node(gfp_t, int);
     973             : extern void blk_put_queue(struct request_queue *);
     974             : 
     975             : /*
     976             :  * tag stuff
     977             :  */
     978             : #define blk_rq_tagged(rq)               ((rq)->cmd_flags & REQ_QUEUED)
     979             : extern int blk_queue_start_tag(struct request_queue *, struct request *);
     980             : extern struct request *blk_queue_find_tag(struct request_queue *, int);
     981             : extern void blk_queue_end_tag(struct request_queue *, struct request *);
     982             : extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
     983             : extern void blk_queue_free_tags(struct request_queue *);
     984             : extern int blk_queue_resize_tags(struct request_queue *, int);
     985             : extern void blk_queue_invalidate_tags(struct request_queue *);
     986             : extern struct blk_queue_tag *blk_init_tags(int);
     987             : extern void blk_free_tags(struct blk_queue_tag *);
     988             : 
     989             : static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
     990             :                                                 int tag)
     991             : {
     992             :         if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
     993             :                 return NULL;
     994             :         return bqt->tag_index[tag];
     995             : }
     996             : 
     997             : extern int blkdev_issue_flush(struct block_device *, sector_t *);
     998             : #define DISCARD_FL_WAIT         0x01    /* wait for completion */
     999             : #define DISCARD_FL_BARRIER      0x02    /* issue DISCARD_BARRIER request */
    1000             : extern int blkdev_issue_discard(struct block_device *, sector_t sector,
    1001             :                 sector_t nr_sects, gfp_t, int flags);
    1002             : 
    1003             : static inline int sb_issue_discard(struct super_block *sb,
    1004             :                                    sector_t block, sector_t nr_blocks)
    1005             : {
    1006             :         block <<= (sb->s_blocksize_bits - 9);
    1007             :         nr_blocks <<= (sb->s_blocksize_bits - 9);
    1008             :         return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL,
    1009             :                                     DISCARD_FL_BARRIER);
    1010             : }
    1011             : 
    1012             : extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
    1013             : 
    1014             : #define MAX_PHYS_SEGMENTS 128
    1015             : #define MAX_HW_SEGMENTS 128
    1016             : #define SAFE_MAX_SECTORS 255
    1017             : #define BLK_DEF_MAX_SECTORS 1024
    1018             : 
    1019             : #define MAX_SEGMENT_SIZE        65536
    1020             : 
    1021             : #define BLK_SEG_BOUNDARY_MASK   0xFFFFFFFFUL
    1022             : 
    1023             : #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
    1024             : 
    1025             : static inline unsigned long queue_bounce_pfn(struct request_queue *q)
    1026             : {
    1027             :         return q->limits.bounce_pfn;
    1028             : }
    1029             : 
    1030             : static inline unsigned long queue_segment_boundary(struct request_queue *q)
    1031             : {
    1032             :         return q->limits.seg_boundary_mask;
    1033             : }
    1034             : 
    1035             : static inline unsigned int queue_max_sectors(struct request_queue *q)
    1036             : {
    1037             :         return q->limits.max_sectors;
    1038             : }
    1039             : 
    1040             : static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
    1041             : {
    1042             :         return q->limits.max_hw_sectors;
    1043             : }
    1044             : 
    1045             : static inline unsigned short queue_max_hw_segments(struct request_queue *q)
    1046             : {
    1047             :         return q->limits.max_hw_segments;
    1048             : }
    1049             : 
    1050             : static inline unsigned short queue_max_phys_segments(struct request_queue *q)
    1051             : {
    1052             :         return q->limits.max_phys_segments;
    1053             : }
    1054             : 
    1055             : static inline unsigned int queue_max_segment_size(struct request_queue *q)
    1056             : {
    1057             :         return q->limits.max_segment_size;
    1058             : }
    1059             : 
    1060             : static inline unsigned short queue_logical_block_size(struct request_queue *q)
    1061             : {
    1062             :         int retval = 512;
    1063             : 
    1064             :         if (q && q->limits.logical_block_size)
    1065             :                 retval = q->limits.logical_block_size;
    1066             : 
    1067             :         return retval;
    1068             : }
    1069             : 
    1070             : static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
    1071             : {
    1072             :         return queue_logical_block_size(bdev_get_queue(bdev));
    1073             : }
    1074             : 
    1075             : static inline unsigned int queue_physical_block_size(struct request_queue *q)
    1076             : {
    1077             :         return q->limits.physical_block_size;
    1078             : }
    1079             : 
    1080             : static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
    1081             : {
    1082             :         return queue_physical_block_size(bdev_get_queue(bdev));
    1083             : }
    1084             : 
    1085             : static inline unsigned int queue_io_min(struct request_queue *q)
    1086             : {
    1087             :         return q->limits.io_min;
    1088             : }
    1089             : 
    1090             : static inline int bdev_io_min(struct block_device *bdev)
    1091             : {
    1092             :         return queue_io_min(bdev_get_queue(bdev));
    1093             : }
    1094             : 
    1095             : static inline unsigned int queue_io_opt(struct request_queue *q)
    1096             : {
    1097             :         return q->limits.io_opt;
    1098             : }
    1099             : 
    1100             : static inline int bdev_io_opt(struct block_device *bdev)
    1101             : {
    1102             :         return queue_io_opt(bdev_get_queue(bdev));
    1103             : }
    1104             : 
    1105             : static inline int queue_alignment_offset(struct request_queue *q)
    1106             : {
    1107             :         if (q->limits.misaligned)
    1108             :                 return -1;
    1109             : 
    1110             :         return q->limits.alignment_offset;
    1111             : }
    1112             : 
    1113             : static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
    1114             : {
    1115             :         unsigned int granularity = max(lim->physical_block_size, lim->io_min);
    1116             : 
    1117             :         offset &= granularity - 1;
    1118             :         return (granularity + lim->alignment_offset - offset) & (granularity - 1);
    1119             : }
    1120             : 
    1121             : static inline int queue_sector_alignment_offset(struct request_queue *q,
    1122             :                                                 sector_t sector)
    1123             : {
    1124             :         return queue_limit_alignment_offset(&q->limits, sector << 9);
    1125             : }
    1126             : 
    1127             : static inline int bdev_alignment_offset(struct block_device *bdev)
    1128             : {
    1129             :         struct request_queue *q = bdev_get_queue(bdev);
    1130             : 
    1131             :         if (q->limits.misaligned)
    1132             :                 return -1;
    1133             : 
    1134             :         if (bdev != bdev->bd_contains)
    1135             :                 return bdev->bd_part->alignment_offset;
    1136             : 
    1137             :         return q->limits.alignment_offset;
    1138             : }
    1139             : 
    1140             : static inline int queue_discard_alignment(struct request_queue *q)
    1141             : {
    1142             :         if (q->limits.discard_misaligned)
    1143             :                 return -1;
    1144             : 
    1145             :         return q->limits.discard_alignment;
    1146             : }
    1147             : 
    1148             : static inline int queue_sector_discard_alignment(struct request_queue *q,
    1149             :                                                  sector_t sector)
    1150             : {
    1151             :         struct queue_limits *lim = &q->limits;
    1152             :         unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
    1153             : 
    1154             :         return (lim->discard_granularity + lim->discard_alignment - alignment)
    1155             :                 & (lim->discard_granularity - 1);
    1156             : }
    1157             : 
    1158             : static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
    1159             : {
    1160             :         if (q->limits.discard_zeroes_data == 1)
    1161             :                 return 1;
    1162             : 
    1163             :         return 0;
    1164             : }
    1165             : 
    1166             : static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
    1167             : {
    1168             :         return queue_discard_zeroes_data(bdev_get_queue(bdev));
    1169             : }
    1170             : 
    1171             : static inline int queue_dma_alignment(struct request_queue *q)
    1172             : {
    1173             :         return q ? q->dma_alignment : 511;
    1174             : }
    1175             : 
    1176             : static inline int blk_rq_aligned(struct request_queue *q, void *addr,
    1177             :                                  unsigned int len)
    1178             : {
    1179             :         unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
    1180             :         return !((unsigned long)addr & alignment) && !(len & alignment);
    1181             : }
    1182             : 
    1183             : /* assumes size > 256 */
    1184             : static inline unsigned int blksize_bits(unsigned int size)
    1185             : {
    1186             :         unsigned int bits = 8;
    1187             :         do {
    1188             :                 bits++;
    1189             :                 size >>= 1;
    1190             :         } while (size > 256);
    1191             :         return bits;
    1192             : }
    1193             : 
    1194             : static inline unsigned int block_size(struct block_device *bdev)
    1195             : {
    1196             :         return bdev->bd_block_size;
    1197             : }
    1198             : 
    1199             : typedef struct {struct page *v;} Sector;
    1200             : 
    1201             : unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
    1202             : 
    1203             : static inline void put_dev_sector(Sector p)
    1204             : {
    1205             :         page_cache_release(p.v);
    1206             : }
    1207             : 
    1208             : struct work_struct;
    1209             : int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
    1210           1 : 
    1211             : #define MODULE_ALIAS_BLOCKDEV(major,minor) \
    1212             :         MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
    1213             : #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
    1214             :         MODULE_ALIAS("block-major-" __stringify(major) "-*")
    1215             : 
    1216             : #if defined(CONFIG_BLK_DEV_INTEGRITY)
    1217             : 
    1218             : #define INTEGRITY_FLAG_READ     2       /* verify data integrity on read */
    1219             : #define INTEGRITY_FLAG_WRITE    4       /* generate data integrity on write */
    1220             : 
    1221             : struct blk_integrity_exchg {
    1222             :         void                    *prot_buf;
    1223             :         void                    *data_buf;
    1224             :         sector_t                sector;
    1225             :         unsigned int            data_size;
    1226             :         unsigned short          sector_size;
    1227             :         const char              *disk_name;
    1228             : };
    1229             : 
    1230             : typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
    1231             : typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
    1232             : typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
    1233             : typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
    1234             : 
    1235             : struct blk_integrity {
    1236             :         integrity_gen_fn        *generate_fn;
    1237             :         integrity_vrfy_fn       *verify_fn;
    1238             :         integrity_set_tag_fn    *set_tag_fn;
    1239             :         integrity_get_tag_fn    *get_tag_fn;
    1240             : 
    1241             :         unsigned short          flags;
    1242             :         unsigned short          tuple_size;
    1243             :         unsigned short          sector_size;
    1244             :         unsigned short          tag_size;
    1245             : 
    1246             :         const char              *name;
    1247             : 
    1248             :         struct kobject          kobj;
    1249             : };
    1250             : 
    1251             : extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
    1252             : extern void blk_integrity_unregister(struct gendisk *);
    1253             : extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
    1254             : extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
    1255             : extern int blk_rq_count_integrity_sg(struct request *);
    1256             : 
    1257             : static inline
    1258             : struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
    1259             : {
    1260             :         return bdev->bd_disk->integrity;
    1261             : }
    1262             : 
    1263             : static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
    1264             : {
    1265             :         return disk->integrity;
    1266             : }
    1267             : 
    1268             : static inline int blk_integrity_rq(struct request *rq)
    1269             : {
    1270             :         if (rq->bio == NULL)
    1271             :                 return 0;
    1272             : 
    1273             :         return bio_integrity(rq->bio);
    1274             : }
    1275             : 
    1276             : #else /* CONFIG_BLK_DEV_INTEGRITY */
    1277             : 
    1278             : #define blk_integrity_rq(rq)                    (0)
    1279             : #define blk_rq_count_integrity_sg(a)            (0)
    1280             : #define blk_rq_map_integrity_sg(a, b)           (0)
    1281             : #define bdev_get_integrity(a)                   (0)
    1282             : #define blk_get_integrity(a)                    (0)
    1283             : #define blk_integrity_compare(a, b)             (0)
    1284             : #define blk_integrity_register(a, b)            (0)
    1285             : #define blk_integrity_unregister(a)             do { } while (0);
    1286             : 
    1287             : #endif /* CONFIG_BLK_DEV_INTEGRITY */
    1288             : 
    1289             : struct block_device_operations {
    1290             :         int (*open) (struct block_device *, fmode_t);
    1291             :         int (*release) (struct gendisk *, fmode_t);
    1292             :         int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
    1293             :         int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
    1294             :         int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
    1295             :         int (*direct_access) (struct block_device *, sector_t,
    1296             :                                                 void **, unsigned long *);
    1297             :         int (*media_changed) (struct gendisk *);
    1298             :         unsigned long long (*set_capacity) (struct gendisk *,
    1299             :                                                 unsigned long long);
    1300             :         int (*revalidate_disk) (struct gendisk *);
    1301             :         int (*getgeo)(struct block_device *, struct hd_geometry *);
    1302             :         struct module *owner;
    1303             : };
    1304             : 
    1305             : extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
    1306           1 :                                  unsigned long);
    1307             : #else /* CONFIG_BLOCK */
    1308             : /*
    1309             :  * stubs for when the block layer is configured out
    1310             :  */
    1311             : #define buffer_heads_over_limit 0
    1312             : 
    1313             : static inline long nr_blockdev_pages(void)
    1314             : {
    1315             :         return 0;
    1316             : }
    1317             : 
    1318             : #endif /* CONFIG_BLOCK */
    1319             : 
    1320             : #endif

Generated by: LCOV version 1.10