LCOV - code coverage report
Current view: top level - drivers/mmc/card - queue.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 75 167 44.9 %
Date: 2017-01-25 Functions: 3 10 30.0 %

          Line data    Source code
       1             : /*
       2             :  *  linux/drivers/mmc/card/queue.c
       3             :  *
       4             :  *  Copyright (C) 2003 Russell King, All Rights Reserved.
       5             :  *  Copyright 2006-2007 Pierre Ossman
       6             :  *
       7             :  * This program is free software; you can redistribute it and/or modify
       8             :  * it under the terms of the GNU General Public License version 2 as
       9             :  * published by the Free Software Foundation.
      10             :  *
      11             :  */
      12             : #include <linux/module.h>
      13             : #include <linux/blkdev.h>
      14             : #include <linux/freezer.h>
      15             : #include <linux/kthread.h>
      16             : #include <linux/scatterlist.h>
      17             : 
      18             : #include <linux/mmc/card.h>
      19             : #include <linux/mmc/host.h>
      20             : #include "queue.h"
      21             : 
      22             : #define MMC_QUEUE_BOUNCESZ      65536
      23             : 
      24             : #define MMC_QUEUE_SUSPENDED     (1 << 0)
      25             : 
      26             : /*
      27             :  * Prepare a MMC request. This just filters out odd stuff.
      28             :  */
      29             : static int mmc_prep_request(struct request_queue *q, struct request *req)
      30             : {
      31             :         /*
      32             :          * We only like normal block requests.
      33             :          */
      34           0 :         if (!blk_fs_request(req)) {
      35           0 :                 blk_dump_rq_flags(req, "MMC bad request");
      36           0 :                 return BLKPREP_KILL;
      37             :         }
      38             : 
      39           0 :         req->cmd_flags |= REQ_DONTPREP;
      40             : 
      41           0 :         return BLKPREP_OK;
      42             : }
      43             : 
      44             : static int mmc_queue_thread(void *d)
      45             : {
      46           0 :         struct mmc_queue *mq = d;
      47           0 :         struct request_queue *q = mq->queue;
      48           0 : 
      49           0 :         current->flags |= PF_MEMALLOC;
      50           0 : 
      51           0 :         down(&mq->thread_sem);
      52           0 :         do {
      53           0 :                 struct request *req = NULL;
      54           0 : 
      55           0 :                 spin_lock_irq(q->queue_lock);
      56           0 :                 set_current_state(TASK_INTERRUPTIBLE);
      57           0 :                 if (!blk_queue_plugged(q))
      58           0 :                         req = blk_fetch_request(q);
      59           0 :                 mq->req = req;
      60           0 :                 spin_unlock_irq(q->queue_lock);
      61           0 : 
      62           0 :                 if (!req) {
      63           0 :                         if (kthread_should_stop()) {
      64           0 :                                 set_current_state(TASK_RUNNING);
      65           0 :                                 break;
      66           0 :                         }
      67           0 :                         up(&mq->thread_sem);
      68           0 :                         schedule();
      69           0 :                         down(&mq->thread_sem);
      70           0 :                         continue;
      71             :                 }
      72           0 :                 set_current_state(TASK_RUNNING);
      73           0 : 
      74           0 :                 mq->issue_fn(mq, req);
      75           0 :         } while (1);
      76           0 :         up(&mq->thread_sem);
      77             : 
      78           0 :         return 0;
      79             : }
      80             : 
      81             : /*
      82             :  * Generic MMC request handler.  This is called for any queue on a
      83             :  * particular host.  When the host is not busy, we look for a request
      84             :  * on any queue on this host, and attempt to issue it.  This may
      85             :  * not be the queue we were asked to process.
      86             :  */
      87             : static void mmc_request(struct request_queue *q)
      88             : {
      89           0 :         struct mmc_queue *mq = q->queuedata;
      90           0 :         struct request *req;
      91             : 
      92           0 :         if (!mq) {
      93           0 :                 while ((req = blk_fetch_request(q)) != NULL) {
      94           0 :                         req->cmd_flags |= REQ_QUIET;
      95           0 :                         __blk_end_request_all(req, -EIO);
      96             :                 }
      97           0 :                 return;
      98             :         }
      99             : 
     100           0 :         if (!mq->req)
     101           0 :                 wake_up_process(mq->thread);
     102           0 : }
     103             : 
     104             : /**
     105             :  * mmc_init_queue - initialise a queue structure.
     106             :  * @mq: mmc queue
     107             :  * @card: mmc card to attach this queue
     108             :  * @lock: queue lock
     109             :  *
     110             :  * Initialise a MMC card request queue.
     111             :  */
     112             : int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
     113             : {
     114           2 :         struct mmc_host *host = card->host;
     115           2 :         u64 limit = BLK_BOUNCE_HIGH;
     116           1 :         int ret;
     117           1 : 
     118           6 :         if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
     119           2 :                 limit = *mmc_dev(host)->dma_mask;
     120           1 : 
     121           2 :         mq->card = card;
     122           2 :         mq->queue = blk_init_queue(mmc_request, lock);
     123           4 :         if (!mq->queue)
     124           2 :                 return -ENOMEM;
     125             : 
     126           1 :         mq->queue->queuedata = mq;
     127           1 :         mq->req = NULL;
     128             : 
     129           1 :         blk_queue_prep_rq(mq->queue, mmc_prep_request);
     130           1 :         blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
     131           2 :         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
     132             : 
     133             : #ifdef CONFIG_MMC_BLOCK_BOUNCE
     134             :         if (host->max_hw_segs == 1) {
     135             :                 unsigned int bouncesz;
     136             : 
     137             :                 bouncesz = MMC_QUEUE_BOUNCESZ;
     138             : 
     139             :                 if (bouncesz > host->max_req_size)
     140             :                         bouncesz = host->max_req_size;
     141             :                 if (bouncesz > host->max_seg_size)
     142             :                         bouncesz = host->max_seg_size;
     143             :                 if (bouncesz > (host->max_blk_count * 512))
     144             :                         bouncesz = host->max_blk_count * 512;
     145             : 
     146             :                 if (bouncesz > 512) {
     147             :                         mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
     148             :                         if (!mq->bounce_buf) {
     149             :                                 printk(KERN_WARNING "%s: unable to "
     150             :                                         "allocate bounce buffer\n",
     151             :                                         mmc_card_name(card));
     152             :                         }
     153             :                 }
     154             : 
     155             :                 if (mq->bounce_buf) {
     156             :                         blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
     157             :                         blk_queue_max_sectors(mq->queue, bouncesz / 512);
     158             :                         blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
     159             :                         blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
     160             :                         blk_queue_max_segment_size(mq->queue, bouncesz);
     161             : 
     162             :                         mq->sg = kmalloc(sizeof(struct scatterlist),
     163             :                                 GFP_KERNEL);
     164             :                         if (!mq->sg) {
     165             :                                 ret = -ENOMEM;
     166             :                                 goto cleanup_queue;
     167             :                         }
     168             :                         sg_init_table(mq->sg, 1);
     169             : 
     170             :                         mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
     171             :                                 bouncesz / 512, GFP_KERNEL);
     172             :                         if (!mq->bounce_sg) {
     173             :                                 ret = -ENOMEM;
     174             :                                 goto cleanup_queue;
     175             :                         }
     176             :                         sg_init_table(mq->bounce_sg, bouncesz / 512);
     177             :                 }
     178             :         }
     179             : #endif
     180             : 
     181           3 :         if (!mq->bounce_buf) {
     182           1 :                 blk_queue_bounce_limit(mq->queue, limit);
     183             :                 blk_queue_max_sectors(mq->queue,
     184           8 :                         min(host->max_blk_count, host->max_req_size / 512));
     185           2 :                 blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
     186           2 :                 blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
     187           1 :                 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
     188             : 
     189           4 :                 mq->sg = kmalloc(sizeof(struct scatterlist) *
     190             :                         host->max_phys_segs, GFP_KERNEL);
     191           3 :                 if (!mq->sg) {
     192           1 :                         ret = -ENOMEM;
     193           1 :                         goto cleanup_queue;
     194             :                 }
     195           2 :                 sg_init_table(mq->sg, host->max_phys_segs);
     196             :         }
     197             : 
     198           4 :         init_MUTEX(&mq->thread_sem);
     199             : 
     200           8 :         mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
     201           5 :         if (IS_ERR(mq->thread)) {
     202           4 :                 ret = PTR_ERR(mq->thread);
     203           1 :                 goto free_bounce_sg;
     204             :         }
     205             : 
     206           1 :         return 0;
     207           1 :  free_bounce_sg:
     208           3 :         if (mq->bounce_sg)
     209           2 :                 kfree(mq->bounce_sg);
     210           1 :         mq->bounce_sg = NULL;
     211           1 :  cleanup_queue:
     212           6 :         if (mq->sg)
     213           4 :                 kfree(mq->sg);
     214           2 :         mq->sg = NULL;
     215           6 :         if (mq->bounce_buf)
     216           4 :                 kfree(mq->bounce_buf);
     217           2 :         mq->bounce_buf = NULL;
     218           2 :         blk_cleanup_queue(mq->queue);
     219           2 :         return ret;
     220             : }
     221             : 
     222             : void mmc_cleanup_queue(struct mmc_queue *mq)
     223             : {
     224           4 :         struct request_queue *q = mq->queue;
     225           2 :         unsigned long flags;
     226           2 : 
     227             :         /* Make sure the queue isn't suspended, as that will deadlock */
     228           6 :         mmc_queue_resume(mq);
     229             : 
     230             :         /* Then terminate our worker thread */
     231           2 :         kthread_stop(mq->thread);
     232             : 
     233             :         /* Empty the queue */
     234           6 :         spin_lock_irqsave(q->queue_lock, flags);
     235           2 :         q->queuedata = NULL;
     236           2 :         blk_start_queue(q);
     237           4 :         spin_unlock_irqrestore(q->queue_lock, flags);
     238             : 
     239           6 :         if (mq->bounce_sg)
     240           4 :                 kfree(mq->bounce_sg);
     241           2 :         mq->bounce_sg = NULL;
     242             : 
     243           4 :         kfree(mq->sg);
     244           2 :         mq->sg = NULL;
     245             : 
     246           6 :         if (mq->bounce_buf)
     247           4 :                 kfree(mq->bounce_buf);
     248           2 :         mq->bounce_buf = NULL;
     249             : 
     250           2 :         mq->card = NULL;
     251           2 : }
     252             : EXPORT_SYMBOL(mmc_cleanup_queue);
     253             : 
     254             : /**
     255             :  * mmc_queue_suspend - suspend a MMC request queue
     256             :  * @mq: MMC queue to suspend
     257             :  *
     258             :  * Stop the block request queue, and wait for our thread to
     259             :  * complete any outstanding requests.  This ensures that we
     260             :  * won't suspend while a request is being processed.
     261             :  */
     262             : void mmc_queue_suspend(struct mmc_queue *mq)
     263             : {
     264           0 :         struct request_queue *q = mq->queue;
     265           0 :         unsigned long flags;
     266           0 : 
     267           0 :         if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
     268           0 :                 mq->flags |= MMC_QUEUE_SUSPENDED;
     269             : 
     270           0 :                 spin_lock_irqsave(q->queue_lock, flags);
     271           0 :                 blk_stop_queue(q);
     272           0 :                 spin_unlock_irqrestore(q->queue_lock, flags);
     273             : 
     274           0 :                 down(&mq->thread_sem);
     275             :         }
     276           0 : }
     277             : 
     278             : /**
     279             :  * mmc_queue_resume - resume a previously suspended MMC request queue
     280             :  * @mq: MMC queue to resume
     281             :  */
     282             : void mmc_queue_resume(struct mmc_queue *mq)
     283             : {
     284           4 :         struct request_queue *q = mq->queue;
     285           2 :         unsigned long flags;
     286           2 : 
     287           6 :         if (mq->flags & MMC_QUEUE_SUSPENDED) {
     288           2 :                 mq->flags &= ~MMC_QUEUE_SUSPENDED;
     289             : 
     290           2 :                 up(&mq->thread_sem);
     291             : 
     292           6 :                 spin_lock_irqsave(q->queue_lock, flags);
     293           2 :                 blk_start_queue(q);
     294           4 :                 spin_unlock_irqrestore(q->queue_lock, flags);
     295             :         }
     296           4 : }
     297             : 
     298             : /*
     299             :  * Prepare the sg list(s) to be handed of to the host driver
     300             :  */
     301             : unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
     302             : {
     303           0 :         unsigned int sg_len;
     304           0 :         size_t buflen;
     305           0 :         struct scatterlist *sg;
     306           0 :         int i;
     307           0 : 
     308           0 :         if (!mq->bounce_buf)
     309           0 :                 return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
     310             : 
     311           0 :         BUG_ON(!mq->bounce_sg);
     312             : 
     313           0 :         sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
     314             : 
     315           0 :         mq->bounce_sg_len = sg_len;
     316             : 
     317           0 :         buflen = 0;
     318           0 :         for_each_sg(mq->bounce_sg, sg, sg_len, i)
     319           0 :                 buflen += sg->length;
     320           0 : 
     321           0 :         sg_init_one(mq->sg, mq->bounce_buf, buflen);
     322             : 
     323           0 :         return 1;
     324             : }
     325             : 
     326             : /*
     327             :  * If writing, bounce the data to the buffer before the request
     328             :  * is sent to the host driver
     329             :  */
     330             : void mmc_queue_bounce_pre(struct mmc_queue *mq)
     331             : {
     332           0 :         unsigned long flags;
     333           0 : 
     334           0 :         if (!mq->bounce_buf)
     335           0 :                 return;
     336             : 
     337           0 :         if (rq_data_dir(mq->req) != WRITE)
     338           0 :                 return;
     339             : 
     340           0 :         local_irq_save(flags);
     341           0 :         sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
     342             :                 mq->bounce_buf, mq->sg[0].length);
     343           0 :         local_irq_restore(flags);
     344           0 : }
     345             : 
     346             : /*
     347             :  * If reading, bounce the data from the buffer after the request
     348             :  * has been handled by the host driver
     349             :  */
     350             : void mmc_queue_bounce_post(struct mmc_queue *mq)
     351             : {
     352           0 :         unsigned long flags;
     353           0 : 
     354           0 :         if (!mq->bounce_buf)
     355           0 :                 return;
     356             : 
     357           0 :         if (rq_data_dir(mq->req) != READ)
     358           0 :                 return;
     359             : 
     360           0 :         local_irq_save(flags);
     361           0 :         sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
     362             :                 mq->bounce_buf, mq->sg[0].length);
     363           0 :         local_irq_restore(flags);
     364           0 : }
     365             : 

Generated by: LCOV version 1.10