Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
[firefly-linux-kernel-4.4.55.git] / drivers / mmc / card / queue.c
1 /*
2  *  linux/drivers/mmc/card/queue.c
3  *
4  *  Copyright (C) 2003 Russell King, All Rights Reserved.
5  *  Copyright 2006-2007 Pierre Ossman
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  */
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/freezer.h>
16 #include <linux/kthread.h>
17 #include <linux/scatterlist.h>
18 #include <linux/dma-mapping.h>
19
20 #include <linux/mmc/card.h>
21 #include <linux/mmc/host.h>
22 #include <linux/sched/rt.h>
23 #include "queue.h"
24
25 #define MMC_QUEUE_BOUNCESZ      65536
26
27 /*
28  * Prepare a MMC request. This just filters out odd stuff.
29  */
30 static int mmc_prep_request(struct request_queue *q, struct request *req)
31 {
32         struct mmc_queue *mq = q->queuedata;
33
34         /*
35          * We only like normal block requests and discards.
36          */
37         if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
38                 blk_dump_rq_flags(req, "MMC bad request");
39                 return BLKPREP_KILL;
40         }
41
42         if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
43                 return BLKPREP_KILL;
44
45         req->cmd_flags |= REQ_DONTPREP;
46
47         return BLKPREP_OK;
48 }
49
50 static int mmc_queue_thread(void *d)
51 {
52         struct mmc_queue *mq = d;
53         struct request_queue *q = mq->queue;
54         struct sched_param scheduler_params = {0};
55
56         scheduler_params.sched_priority = 1;
57
58         sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
59
60         current->flags |= PF_MEMALLOC;
61
62         down(&mq->thread_sem);
63         do {
64                 struct request *req = NULL;
65                 unsigned int cmd_flags = 0;
66
67                 spin_lock_irq(q->queue_lock);
68                 set_current_state(TASK_INTERRUPTIBLE);
69                 req = blk_fetch_request(q);
70                 mq->mqrq_cur->req = req;
71                 spin_unlock_irq(q->queue_lock);
72
73                 if (req || mq->mqrq_prev->req) {
74                         set_current_state(TASK_RUNNING);
75                         cmd_flags = req ? req->cmd_flags : 0;
76                         mq->issue_fn(mq, req);
77                         cond_resched();
78                         if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
79                                 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
80                                 continue; /* fetch again */
81                         }
82
83                         /*
84                          * Current request becomes previous request
85                          * and vice versa.
86                          * In case of special requests, current request
87                          * has been finished. Do not assign it to previous
88                          * request.
89                          */
90                         if (cmd_flags & MMC_REQ_SPECIAL_MASK)
91                                 mq->mqrq_cur->req = NULL;
92
93                         mq->mqrq_prev->brq.mrq.data = NULL;
94                         mq->mqrq_prev->req = NULL;
95                         swap(mq->mqrq_prev, mq->mqrq_cur);
96                 } else {
97                         if (kthread_should_stop()) {
98                                 set_current_state(TASK_RUNNING);
99                                 break;
100                         }
101                         up(&mq->thread_sem);
102                         schedule();
103                         down(&mq->thread_sem);
104                 }
105         } while (1);
106         up(&mq->thread_sem);
107
108         return 0;
109 }
110
111 /*
112  * Generic MMC request handler.  This is called for any queue on a
113  * particular host.  When the host is not busy, we look for a request
114  * on any queue on this host, and attempt to issue it.  This may
115  * not be the queue we were asked to process.
116  */
117 static void mmc_request_fn(struct request_queue *q)
118 {
119         struct mmc_queue *mq = q->queuedata;
120         struct request *req;
121         unsigned long flags;
122         struct mmc_context_info *cntx;
123
124         if (!mq) {
125                 while ((req = blk_fetch_request(q)) != NULL) {
126                         req->cmd_flags |= REQ_QUIET;
127                         __blk_end_request_all(req, -EIO);
128                 }
129                 return;
130         }
131
132         cntx = &mq->card->host->context_info;
133         if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
134                 /*
135                  * New MMC request arrived when MMC thread may be
136                  * blocked on the previous request to be complete
137                  * with no current request fetched
138                  */
139                 spin_lock_irqsave(&cntx->lock, flags);
140                 if (cntx->is_waiting_last_req) {
141                         cntx->is_new_req = true;
142                         wake_up_interruptible(&cntx->wait);
143                 }
144                 spin_unlock_irqrestore(&cntx->lock, flags);
145         } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
146                 wake_up_process(mq->thread);
147 }
148
149 static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
150 {
151         struct scatterlist *sg;
152
153         sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
154         if (!sg)
155                 *err = -ENOMEM;
156         else {
157                 *err = 0;
158                 sg_init_table(sg, sg_len);
159         }
160
161         return sg;
162 }
163
164 static void mmc_queue_setup_discard(struct request_queue *q,
165                                     struct mmc_card *card)
166 {
167         unsigned max_discard;
168
169         max_discard = mmc_calc_max_discard(card);
170         if (!max_discard)
171                 return;
172
173         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
174         blk_queue_max_discard_sectors(q, max_discard);
175         if (card->erased_byte == 0 && !mmc_can_discard(card))
176                 q->limits.discard_zeroes_data = 1;
177         q->limits.discard_granularity = card->pref_erase << 9;
178         /* granularity must not be greater than max. discard */
179         if (card->pref_erase > max_discard)
180                 q->limits.discard_granularity = 0;
181         if (mmc_can_secure_erase_trim(card))
182                 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
183 }
184
185 /**
186  * mmc_init_queue - initialise a queue structure.
187  * @mq: mmc queue
188  * @card: mmc card to attach this queue
189  * @lock: queue lock
190  * @subname: partition subname
191  *
192  * Initialise a MMC card request queue.
193  */
194 int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
195                    spinlock_t *lock, const char *subname)
196 {
197         struct mmc_host *host = card->host;
198         u64 limit = BLK_BOUNCE_HIGH;
199         int ret;
200         struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
201         struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
202
203         if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
204                 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
205
206         mq->card = card;
207         mq->queue = blk_init_queue(mmc_request_fn, lock);
208         if (!mq->queue)
209                 return -ENOMEM;
210
211         mq->mqrq_cur = mqrq_cur;
212         mq->mqrq_prev = mqrq_prev;
213         mq->queue->queuedata = mq;
214
215         blk_queue_prep_rq(mq->queue, mmc_prep_request);
216         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
217         queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
218         if (mmc_can_erase(card))
219                 mmc_queue_setup_discard(mq->queue, card);
220
221 #ifdef CONFIG_MMC_BLOCK_BOUNCE
222         if (host->max_segs == 1) {
223                 unsigned int bouncesz;
224
225                 bouncesz = MMC_QUEUE_BOUNCESZ;
226
227                 if (bouncesz > host->max_req_size)
228                         bouncesz = host->max_req_size;
229                 if (bouncesz > host->max_seg_size)
230                         bouncesz = host->max_seg_size;
231                 if (bouncesz > (host->max_blk_count * 512))
232                         bouncesz = host->max_blk_count * 512;
233
234                 if (bouncesz > 512) {
235                         mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
236                         if (!mqrq_cur->bounce_buf) {
237                                 pr_warn("%s: unable to allocate bounce cur buffer\n",
238                                         mmc_card_name(card));
239                         } else {
240                                 mqrq_prev->bounce_buf =
241                                                 kmalloc(bouncesz, GFP_KERNEL);
242                                 if (!mqrq_prev->bounce_buf) {
243                                         pr_warn("%s: unable to allocate bounce prev buffer\n",
244                                                 mmc_card_name(card));
245                                         kfree(mqrq_cur->bounce_buf);
246                                         mqrq_cur->bounce_buf = NULL;
247                                 }
248                         }
249                 }
250
251                 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
252                         blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
253                         blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
254                         blk_queue_max_segments(mq->queue, bouncesz / 512);
255                         blk_queue_max_segment_size(mq->queue, bouncesz);
256
257                         mqrq_cur->sg = mmc_alloc_sg(1, &ret);
258                         if (ret)
259                                 goto cleanup_queue;
260
261                         mqrq_cur->bounce_sg =
262                                 mmc_alloc_sg(bouncesz / 512, &ret);
263                         if (ret)
264                                 goto cleanup_queue;
265
266                         mqrq_prev->sg = mmc_alloc_sg(1, &ret);
267                         if (ret)
268                                 goto cleanup_queue;
269
270                         mqrq_prev->bounce_sg =
271                                 mmc_alloc_sg(bouncesz / 512, &ret);
272                         if (ret)
273                                 goto cleanup_queue;
274                 }
275         }
276 #endif
277
278         if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
279                 blk_queue_bounce_limit(mq->queue, limit);
280                 blk_queue_max_hw_sectors(mq->queue,
281                         min(host->max_blk_count, host->max_req_size / 512));
282                 blk_queue_max_segments(mq->queue, host->max_segs);
283                 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
284
285                 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
286                 if (ret)
287                         goto cleanup_queue;
288
289
290                 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
291                 if (ret)
292                         goto cleanup_queue;
293         }
294
295         sema_init(&mq->thread_sem, 1);
296
297         mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
298                 host->index, subname ? subname : "");
299
300         if (IS_ERR(mq->thread)) {
301                 ret = PTR_ERR(mq->thread);
302                 goto free_bounce_sg;
303         }
304
305         return 0;
306  free_bounce_sg:
307         kfree(mqrq_cur->bounce_sg);
308         mqrq_cur->bounce_sg = NULL;
309         kfree(mqrq_prev->bounce_sg);
310         mqrq_prev->bounce_sg = NULL;
311
312  cleanup_queue:
313         kfree(mqrq_cur->sg);
314         mqrq_cur->sg = NULL;
315         kfree(mqrq_cur->bounce_buf);
316         mqrq_cur->bounce_buf = NULL;
317
318         kfree(mqrq_prev->sg);
319         mqrq_prev->sg = NULL;
320         kfree(mqrq_prev->bounce_buf);
321         mqrq_prev->bounce_buf = NULL;
322
323         blk_cleanup_queue(mq->queue);
324         return ret;
325 }
326
327 void mmc_cleanup_queue(struct mmc_queue *mq)
328 {
329         struct request_queue *q = mq->queue;
330         unsigned long flags;
331         struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
332         struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
333
334         /* Make sure the queue isn't suspended, as that will deadlock */
335         mmc_queue_resume(mq);
336
337         /* Then terminate our worker thread */
338         kthread_stop(mq->thread);
339
340         /* Empty the queue */
341         spin_lock_irqsave(q->queue_lock, flags);
342         q->queuedata = NULL;
343         blk_start_queue(q);
344         spin_unlock_irqrestore(q->queue_lock, flags);
345
346         kfree(mqrq_cur->bounce_sg);
347         mqrq_cur->bounce_sg = NULL;
348
349         kfree(mqrq_cur->sg);
350         mqrq_cur->sg = NULL;
351
352         kfree(mqrq_cur->bounce_buf);
353         mqrq_cur->bounce_buf = NULL;
354
355         kfree(mqrq_prev->bounce_sg);
356         mqrq_prev->bounce_sg = NULL;
357
358         kfree(mqrq_prev->sg);
359         mqrq_prev->sg = NULL;
360
361         kfree(mqrq_prev->bounce_buf);
362         mqrq_prev->bounce_buf = NULL;
363
364         mq->card = NULL;
365 }
366 EXPORT_SYMBOL(mmc_cleanup_queue);
367
368 int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
369 {
370         struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
371         struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
372         int ret = 0;
373
374
375         mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
376         if (!mqrq_cur->packed) {
377                 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
378                         mmc_card_name(card));
379                 ret = -ENOMEM;
380                 goto out;
381         }
382
383         mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
384         if (!mqrq_prev->packed) {
385                 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
386                         mmc_card_name(card));
387                 kfree(mqrq_cur->packed);
388                 mqrq_cur->packed = NULL;
389                 ret = -ENOMEM;
390                 goto out;
391         }
392
393         INIT_LIST_HEAD(&mqrq_cur->packed->list);
394         INIT_LIST_HEAD(&mqrq_prev->packed->list);
395
396 out:
397         return ret;
398 }
399
400 void mmc_packed_clean(struct mmc_queue *mq)
401 {
402         struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
403         struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
404
405         kfree(mqrq_cur->packed);
406         mqrq_cur->packed = NULL;
407         kfree(mqrq_prev->packed);
408         mqrq_prev->packed = NULL;
409 }
410
411 /**
412  * mmc_queue_suspend - suspend a MMC request queue
413  * @mq: MMC queue to suspend
414  *
415  * Stop the block request queue, and wait for our thread to
416  * complete any outstanding requests.  This ensures that we
417  * won't suspend while a request is being processed.
418  */
419 void mmc_queue_suspend(struct mmc_queue *mq)
420 {
421         struct request_queue *q = mq->queue;
422         unsigned long flags;
423
424         if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
425                 mq->flags |= MMC_QUEUE_SUSPENDED;
426
427                 spin_lock_irqsave(q->queue_lock, flags);
428                 blk_stop_queue(q);
429                 spin_unlock_irqrestore(q->queue_lock, flags);
430
431                 down(&mq->thread_sem);
432         }
433 }
434
435 /**
436  * mmc_queue_resume - resume a previously suspended MMC request queue
437  * @mq: MMC queue to resume
438  */
439 void mmc_queue_resume(struct mmc_queue *mq)
440 {
441         struct request_queue *q = mq->queue;
442         unsigned long flags;
443
444         if (mq->flags & MMC_QUEUE_SUSPENDED) {
445                 mq->flags &= ~MMC_QUEUE_SUSPENDED;
446
447                 up(&mq->thread_sem);
448
449                 spin_lock_irqsave(q->queue_lock, flags);
450                 blk_start_queue(q);
451                 spin_unlock_irqrestore(q->queue_lock, flags);
452         }
453 }
454
455 static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
456                                             struct mmc_packed *packed,
457                                             struct scatterlist *sg,
458                                             enum mmc_packed_type cmd_type)
459 {
460         struct scatterlist *__sg = sg;
461         unsigned int sg_len = 0;
462         struct request *req;
463
464         if (mmc_packed_wr(cmd_type)) {
465                 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
466                 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
467                 unsigned int len, remain, offset = 0;
468                 u8 *buf = (u8 *)packed->cmd_hdr;
469
470                 remain = hdr_sz;
471                 do {
472                         len = min(remain, max_seg_sz);
473                         sg_set_buf(__sg, buf + offset, len);
474                         offset += len;
475                         remain -= len;
476                         sg_unmark_end(__sg++);
477                         sg_len++;
478                 } while (remain);
479         }
480
481         list_for_each_entry(req, &packed->list, queuelist) {
482                 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
483                 __sg = sg + (sg_len - 1);
484                 sg_unmark_end(__sg++);
485         }
486         sg_mark_end(sg + (sg_len - 1));
487         return sg_len;
488 }
489
490 /*
491  * Prepare the sg list(s) to be handed of to the host driver
492  */
493 unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
494 {
495         unsigned int sg_len;
496         size_t buflen;
497         struct scatterlist *sg;
498         enum mmc_packed_type cmd_type;
499         int i;
500
501         cmd_type = mqrq->cmd_type;
502
503         if (!mqrq->bounce_buf) {
504                 if (mmc_packed_cmd(cmd_type))
505                         return mmc_queue_packed_map_sg(mq, mqrq->packed,
506                                                        mqrq->sg, cmd_type);
507                 else
508                         return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
509         }
510
511         BUG_ON(!mqrq->bounce_sg);
512
513         if (mmc_packed_cmd(cmd_type))
514                 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
515                                                  mqrq->bounce_sg, cmd_type);
516         else
517                 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
518
519         mqrq->bounce_sg_len = sg_len;
520
521         buflen = 0;
522         for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
523                 buflen += sg->length;
524
525         sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
526
527         return 1;
528 }
529
530 /*
531  * If writing, bounce the data to the buffer before the request
532  * is sent to the host driver
533  */
534 void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
535 {
536         if (!mqrq->bounce_buf)
537                 return;
538
539         if (rq_data_dir(mqrq->req) != WRITE)
540                 return;
541
542         sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
543                 mqrq->bounce_buf, mqrq->sg[0].length);
544 }
545
546 /*
547  * If reading, bounce the data from the buffer after the request
548  * has been handled by the host driver
549  */
550 void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
551 {
552         if (!mqrq->bounce_buf)
553                 return;
554
555         if (rq_data_dir(mqrq->req) != READ)
556                 return;
557
558         sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
559                 mqrq->bounce_buf, mqrq->sg[0].length);
560 }