Merge branch 'master' into for-linus
[firefly-linux-kernel-4.4.55.git] / drivers / infiniband / hw / qib / qib_sdma.c
1 /*
2  * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/spinlock.h>
34 #include <linux/netdevice.h>
35
36 #include "qib.h"
37 #include "qib_common.h"
38
39 /* default pio off, sdma on */
40 static ushort sdma_descq_cnt = 256;
41 module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
42 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
43
44 /*
45  * Bits defined in the send DMA descriptor.
46  */
47 #define SDMA_DESC_LAST          (1ULL << 11)
48 #define SDMA_DESC_FIRST         (1ULL << 12)
49 #define SDMA_DESC_DMA_HEAD      (1ULL << 13)
50 #define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
51 #define SDMA_DESC_INTR          (1ULL << 15)
52 #define SDMA_DESC_COUNT_LSB     16
53 #define SDMA_DESC_GEN_LSB       30
54
55 char *qib_sdma_state_names[] = {
56         [qib_sdma_state_s00_hw_down]          = "s00_HwDown",
57         [qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
58         [qib_sdma_state_s20_idle]             = "s20_Idle",
59         [qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
60         [qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
61         [qib_sdma_state_s50_hw_halt_wait]     = "s50_HwHaltWait",
62         [qib_sdma_state_s99_running]          = "s99_Running",
63 };
64
65 char *qib_sdma_event_names[] = {
66         [qib_sdma_event_e00_go_hw_down]   = "e00_GoHwDown",
67         [qib_sdma_event_e10_go_hw_start]  = "e10_GoHwStart",
68         [qib_sdma_event_e20_hw_started]   = "e20_HwStarted",
69         [qib_sdma_event_e30_go_running]   = "e30_GoRunning",
70         [qib_sdma_event_e40_sw_cleaned]   = "e40_SwCleaned",
71         [qib_sdma_event_e50_hw_cleaned]   = "e50_HwCleaned",
72         [qib_sdma_event_e60_hw_halted]    = "e60_HwHalted",
73         [qib_sdma_event_e70_go_idle]      = "e70_GoIdle",
74         [qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
75         [qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
76         [qib_sdma_event_e90_timer_tick]   = "e90_TimerTick",
77 };
78
79 /* declare all statics here rather than keep sorting */
80 static int alloc_sdma(struct qib_pportdata *);
81 static void sdma_complete(struct kref *);
82 static void sdma_finalput(struct qib_sdma_state *);
83 static void sdma_get(struct qib_sdma_state *);
84 static void sdma_put(struct qib_sdma_state *);
85 static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
86 static void sdma_start_sw_clean_up(struct qib_pportdata *);
87 static void sdma_sw_clean_up_task(unsigned long);
88 static void unmap_desc(struct qib_pportdata *, unsigned);
89
90 static void sdma_get(struct qib_sdma_state *ss)
91 {
92         kref_get(&ss->kref);
93 }
94
95 static void sdma_complete(struct kref *kref)
96 {
97         struct qib_sdma_state *ss =
98                 container_of(kref, struct qib_sdma_state, kref);
99
100         complete(&ss->comp);
101 }
102
103 static void sdma_put(struct qib_sdma_state *ss)
104 {
105         kref_put(&ss->kref, sdma_complete);
106 }
107
108 static void sdma_finalput(struct qib_sdma_state *ss)
109 {
110         sdma_put(ss);
111         wait_for_completion(&ss->comp);
112 }
113
114 /*
115  * Complete all the sdma requests on the active list, in the correct
116  * order, and with appropriate processing.   Called when cleaning up
117  * after sdma shutdown, and when new sdma requests are submitted for
118  * a link that is down.   This matches what is done for requests
119  * that complete normally, it's just the full list.
120  *
121  * Must be called with sdma_lock held
122  */
123 static void clear_sdma_activelist(struct qib_pportdata *ppd)
124 {
125         struct qib_sdma_txreq *txp, *txp_next;
126
127         list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
128                 list_del_init(&txp->list);
129                 if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
130                         unsigned idx;
131
132                         idx = txp->start_idx;
133                         while (idx != txp->next_descq_idx) {
134                                 unmap_desc(ppd, idx);
135                                 if (++idx == ppd->sdma_descq_cnt)
136                                         idx = 0;
137                         }
138                 }
139                 if (txp->callback)
140                         (*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
141         }
142 }
143
144 static void sdma_sw_clean_up_task(unsigned long opaque)
145 {
146         struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
147         unsigned long flags;
148
149         spin_lock_irqsave(&ppd->sdma_lock, flags);
150
151         /*
152          * At this point, the following should always be true:
153          * - We are halted, so no more descriptors are getting retired.
154          * - We are not running, so no one is submitting new work.
155          * - Only we can send the e40_sw_cleaned, so we can't start
156          *   running again until we say so.  So, the active list and
157          *   descq are ours to play with.
158          */
159
160         /* Process all retired requests. */
161         qib_sdma_make_progress(ppd);
162
163         clear_sdma_activelist(ppd);
164
165         /*
166          * Resync count of added and removed.  It is VERY important that
167          * sdma_descq_removed NEVER decrement - user_sdma depends on it.
168          */
169         ppd->sdma_descq_removed = ppd->sdma_descq_added;
170
171         /*
172          * Reset our notion of head and tail.
173          * Note that the HW registers will be reset when switching states
174          * due to calling __qib_sdma_process_event() below.
175          */
176         ppd->sdma_descq_tail = 0;
177         ppd->sdma_descq_head = 0;
178         ppd->sdma_head_dma[0] = 0;
179         ppd->sdma_generation = 0;
180
181         __qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
182
183         spin_unlock_irqrestore(&ppd->sdma_lock, flags);
184 }
185
186 /*
187  * This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
188  * as a result of send buffer errors or send DMA descriptor errors.
189  * We want to disarm the buffers in these cases.
190  */
191 static void sdma_hw_start_up(struct qib_pportdata *ppd)
192 {
193         struct qib_sdma_state *ss = &ppd->sdma_state;
194         unsigned bufno;
195
196         for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
197                 ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
198
199         ppd->dd->f_sdma_hw_start_up(ppd);
200 }
201
202 static void sdma_sw_tear_down(struct qib_pportdata *ppd)
203 {
204         struct qib_sdma_state *ss = &ppd->sdma_state;
205
206         /* Releasing this reference means the state machine has stopped. */
207         sdma_put(ss);
208 }
209
210 static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
211 {
212         tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
213 }
214
215 static void sdma_set_state(struct qib_pportdata *ppd,
216         enum qib_sdma_states next_state)
217 {
218         struct qib_sdma_state *ss = &ppd->sdma_state;
219         struct sdma_set_state_action *action = ss->set_state_action;
220         unsigned op = 0;
221
222         /* debugging bookkeeping */
223         ss->previous_state = ss->current_state;
224         ss->previous_op = ss->current_op;
225
226         ss->current_state = next_state;
227
228         if (action[next_state].op_enable)
229                 op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
230
231         if (action[next_state].op_intenable)
232                 op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
233
234         if (action[next_state].op_halt)
235                 op |= QIB_SDMA_SENDCTRL_OP_HALT;
236
237         if (action[next_state].op_drain)
238                 op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
239
240         if (action[next_state].go_s99_running_tofalse)
241                 ss->go_s99_running = 0;
242
243         if (action[next_state].go_s99_running_totrue)
244                 ss->go_s99_running = 1;
245
246         ss->current_op = op;
247
248         ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
249 }
250
251 static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
252 {
253         __le64 *descqp = &ppd->sdma_descq[head].qw[0];
254         u64 desc[2];
255         dma_addr_t addr;
256         size_t len;
257
258         desc[0] = le64_to_cpu(descqp[0]);
259         desc[1] = le64_to_cpu(descqp[1]);
260
261         addr = (desc[1] << 32) | (desc[0] >> 32);
262         len = (desc[0] >> 14) & (0x7ffULL << 2);
263         dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
264 }
265
266 static int alloc_sdma(struct qib_pportdata *ppd)
267 {
268         ppd->sdma_descq_cnt = sdma_descq_cnt;
269         if (!ppd->sdma_descq_cnt)
270                 ppd->sdma_descq_cnt = 256;
271
272         /* Allocate memory for SendDMA descriptor FIFO */
273         ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
274                 ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
275                 GFP_KERNEL);
276
277         if (!ppd->sdma_descq) {
278                 qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
279                             "FIFO memory\n");
280                 goto bail;
281         }
282
283         /* Allocate memory for DMA of head register to memory */
284         ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
285                 PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
286         if (!ppd->sdma_head_dma) {
287                 qib_dev_err(ppd->dd, "failed to allocate SendDMA "
288                             "head memory\n");
289                 goto cleanup_descq;
290         }
291         ppd->sdma_head_dma[0] = 0;
292         return 0;
293
294 cleanup_descq:
295         dma_free_coherent(&ppd->dd->pcidev->dev,
296                 ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
297                 ppd->sdma_descq_phys);
298         ppd->sdma_descq = NULL;
299         ppd->sdma_descq_phys = 0;
300 bail:
301         ppd->sdma_descq_cnt = 0;
302         return -ENOMEM;
303 }
304
305 static void free_sdma(struct qib_pportdata *ppd)
306 {
307         struct qib_devdata *dd = ppd->dd;
308
309         if (ppd->sdma_head_dma) {
310                 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
311                                   (void *)ppd->sdma_head_dma,
312                                   ppd->sdma_head_phys);
313                 ppd->sdma_head_dma = NULL;
314                 ppd->sdma_head_phys = 0;
315         }
316
317         if (ppd->sdma_descq) {
318                 dma_free_coherent(&dd->pcidev->dev,
319                                   ppd->sdma_descq_cnt * sizeof(u64[2]),
320                                   ppd->sdma_descq, ppd->sdma_descq_phys);
321                 ppd->sdma_descq = NULL;
322                 ppd->sdma_descq_phys = 0;
323         }
324 }
325
326 static inline void make_sdma_desc(struct qib_pportdata *ppd,
327                                   u64 *sdmadesc, u64 addr, u64 dwlen,
328                                   u64 dwoffset)
329 {
330
331         WARN_ON(addr & 3);
332         /* SDmaPhyAddr[47:32] */
333         sdmadesc[1] = addr >> 32;
334         /* SDmaPhyAddr[31:0] */
335         sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
336         /* SDmaGeneration[1:0] */
337         sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
338                 SDMA_DESC_GEN_LSB;
339         /* SDmaDwordCount[10:0] */
340         sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
341         /* SDmaBufOffset[12:2] */
342         sdmadesc[0] |= dwoffset & 0x7ffULL;
343 }
344
345 /* sdma_lock must be held */
346 int qib_sdma_make_progress(struct qib_pportdata *ppd)
347 {
348         struct list_head *lp = NULL;
349         struct qib_sdma_txreq *txp = NULL;
350         struct qib_devdata *dd = ppd->dd;
351         int progress = 0;
352         u16 hwhead;
353         u16 idx = 0;
354
355         hwhead = dd->f_sdma_gethead(ppd);
356
357         /* The reason for some of the complexity of this code is that
358          * not all descriptors have corresponding txps.  So, we have to
359          * be able to skip over descs until we wander into the range of
360          * the next txp on the list.
361          */
362
363         if (!list_empty(&ppd->sdma_activelist)) {
364                 lp = ppd->sdma_activelist.next;
365                 txp = list_entry(lp, struct qib_sdma_txreq, list);
366                 idx = txp->start_idx;
367         }
368
369         while (ppd->sdma_descq_head != hwhead) {
370                 /* if desc is part of this txp, unmap if needed */
371                 if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
372                     (idx == ppd->sdma_descq_head)) {
373                         unmap_desc(ppd, ppd->sdma_descq_head);
374                         if (++idx == ppd->sdma_descq_cnt)
375                                 idx = 0;
376                 }
377
378                 /* increment dequed desc count */
379                 ppd->sdma_descq_removed++;
380
381                 /* advance head, wrap if needed */
382                 if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
383                         ppd->sdma_descq_head = 0;
384
385                 /* if now past this txp's descs, do the callback */
386                 if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
387                         /* remove from active list */
388                         list_del_init(&txp->list);
389                         if (txp->callback)
390                                 (*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
391                         /* see if there is another txp */
392                         if (list_empty(&ppd->sdma_activelist))
393                                 txp = NULL;
394                         else {
395                                 lp = ppd->sdma_activelist.next;
396                                 txp = list_entry(lp, struct qib_sdma_txreq,
397                                         list);
398                                 idx = txp->start_idx;
399                         }
400                 }
401                 progress = 1;
402         }
403         if (progress)
404                 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
405         return progress;
406 }
407
408 /*
409  * This is called from interrupt context.
410  */
411 void qib_sdma_intr(struct qib_pportdata *ppd)
412 {
413         unsigned long flags;
414
415         spin_lock_irqsave(&ppd->sdma_lock, flags);
416
417         __qib_sdma_intr(ppd);
418
419         spin_unlock_irqrestore(&ppd->sdma_lock, flags);
420 }
421
422 void __qib_sdma_intr(struct qib_pportdata *ppd)
423 {
424         if (__qib_sdma_running(ppd))
425                 qib_sdma_make_progress(ppd);
426 }
427
428 int qib_setup_sdma(struct qib_pportdata *ppd)
429 {
430         struct qib_devdata *dd = ppd->dd;
431         unsigned long flags;
432         int ret = 0;
433
434         ret = alloc_sdma(ppd);
435         if (ret)
436                 goto bail;
437
438         /* set consistent sdma state */
439         ppd->dd->f_sdma_init_early(ppd);
440         spin_lock_irqsave(&ppd->sdma_lock, flags);
441         sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
442         spin_unlock_irqrestore(&ppd->sdma_lock, flags);
443
444         /* set up reference counting */
445         kref_init(&ppd->sdma_state.kref);
446         init_completion(&ppd->sdma_state.comp);
447
448         ppd->sdma_generation = 0;
449         ppd->sdma_descq_head = 0;
450         ppd->sdma_descq_removed = 0;
451         ppd->sdma_descq_added = 0;
452
453         INIT_LIST_HEAD(&ppd->sdma_activelist);
454
455         tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
456                 (unsigned long)ppd);
457
458         ret = dd->f_init_sdma_regs(ppd);
459         if (ret)
460                 goto bail_alloc;
461
462         qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
463
464         return 0;
465
466 bail_alloc:
467         qib_teardown_sdma(ppd);
468 bail:
469         return ret;
470 }
471
472 void qib_teardown_sdma(struct qib_pportdata *ppd)
473 {
474         qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
475
476         /*
477          * This waits for the state machine to exit so it is not
478          * necessary to kill the sdma_sw_clean_up_task to make sure
479          * it is not running.
480          */
481         sdma_finalput(&ppd->sdma_state);
482
483         free_sdma(ppd);
484 }
485
486 int qib_sdma_running(struct qib_pportdata *ppd)
487 {
488         unsigned long flags;
489         int ret;
490
491         spin_lock_irqsave(&ppd->sdma_lock, flags);
492         ret = __qib_sdma_running(ppd);
493         spin_unlock_irqrestore(&ppd->sdma_lock, flags);
494
495         return ret;
496 }
497
498 /*
499  * Complete a request when sdma not running; likely only request
500  * but to simplify the code, always queue it, then process the full
501  * activelist.  We process the entire list to ensure that this particular
502  * request does get it's callback, but in the correct order.
503  * Must be called with sdma_lock held
504  */
505 static void complete_sdma_err_req(struct qib_pportdata *ppd,
506                                   struct qib_verbs_txreq *tx)
507 {
508         atomic_inc(&tx->qp->s_dma_busy);
509         /* no sdma descriptors, so no unmap_desc */
510         tx->txreq.start_idx = 0;
511         tx->txreq.next_descq_idx = 0;
512         list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
513         clear_sdma_activelist(ppd);
514 }
515
516 /*
517  * This function queues one IB packet onto the send DMA queue per call.
518  * The caller is responsible for checking:
519  * 1) The number of send DMA descriptor entries is less than the size of
520  *    the descriptor queue.
521  * 2) The IB SGE addresses and lengths are 32-bit aligned
522  *    (except possibly the last SGE's length)
523  * 3) The SGE addresses are suitable for passing to dma_map_single().
524  */
525 int qib_sdma_verbs_send(struct qib_pportdata *ppd,
526                         struct qib_sge_state *ss, u32 dwords,
527                         struct qib_verbs_txreq *tx)
528 {
529         unsigned long flags;
530         struct qib_sge *sge;
531         struct qib_qp *qp;
532         int ret = 0;
533         u16 tail;
534         __le64 *descqp;
535         u64 sdmadesc[2];
536         u32 dwoffset;
537         dma_addr_t addr;
538
539         spin_lock_irqsave(&ppd->sdma_lock, flags);
540
541 retry:
542         if (unlikely(!__qib_sdma_running(ppd))) {
543                 complete_sdma_err_req(ppd, tx);
544                 goto unlock;
545         }
546
547         if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
548                 if (qib_sdma_make_progress(ppd))
549                         goto retry;
550                 if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
551                         ppd->dd->f_sdma_set_desc_cnt(ppd,
552                                         ppd->sdma_descq_cnt / 2);
553                 goto busy;
554         }
555
556         dwoffset = tx->hdr_dwords;
557         make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
558
559         sdmadesc[0] |= SDMA_DESC_FIRST;
560         if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
561                 sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
562
563         /* write to the descq */
564         tail = ppd->sdma_descq_tail;
565         descqp = &ppd->sdma_descq[tail].qw[0];
566         *descqp++ = cpu_to_le64(sdmadesc[0]);
567         *descqp++ = cpu_to_le64(sdmadesc[1]);
568
569         /* increment the tail */
570         if (++tail == ppd->sdma_descq_cnt) {
571                 tail = 0;
572                 descqp = &ppd->sdma_descq[0].qw[0];
573                 ++ppd->sdma_generation;
574         }
575
576         tx->txreq.start_idx = tail;
577
578         sge = &ss->sge;
579         while (dwords) {
580                 u32 dw;
581                 u32 len;
582
583                 len = dwords << 2;
584                 if (len > sge->length)
585                         len = sge->length;
586                 if (len > sge->sge_length)
587                         len = sge->sge_length;
588                 BUG_ON(len == 0);
589                 dw = (len + 3) >> 2;
590                 addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
591                                       dw << 2, DMA_TO_DEVICE);
592                 if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
593                         goto unmap;
594                 sdmadesc[0] = 0;
595                 make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
596                 /* SDmaUseLargeBuf has to be set in every descriptor */
597                 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
598                         sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
599                 /* write to the descq */
600                 *descqp++ = cpu_to_le64(sdmadesc[0]);
601                 *descqp++ = cpu_to_le64(sdmadesc[1]);
602
603                 /* increment the tail */
604                 if (++tail == ppd->sdma_descq_cnt) {
605                         tail = 0;
606                         descqp = &ppd->sdma_descq[0].qw[0];
607                         ++ppd->sdma_generation;
608                 }
609                 sge->vaddr += len;
610                 sge->length -= len;
611                 sge->sge_length -= len;
612                 if (sge->sge_length == 0) {
613                         if (--ss->num_sge)
614                                 *sge = *ss->sg_list++;
615                 } else if (sge->length == 0 && sge->mr->lkey) {
616                         if (++sge->n >= QIB_SEGSZ) {
617                                 if (++sge->m >= sge->mr->mapsz)
618                                         break;
619                                 sge->n = 0;
620                         }
621                         sge->vaddr =
622                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
623                         sge->length =
624                                 sge->mr->map[sge->m]->segs[sge->n].length;
625                 }
626
627                 dwoffset += dw;
628                 dwords -= dw;
629         }
630
631         if (!tail)
632                 descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
633         descqp -= 2;
634         descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
635         if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
636                 descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
637         if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
638                 descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
639
640         atomic_inc(&tx->qp->s_dma_busy);
641         tx->txreq.next_descq_idx = tail;
642         ppd->dd->f_sdma_update_tail(ppd, tail);
643         ppd->sdma_descq_added += tx->txreq.sg_count;
644         list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
645         goto unlock;
646
647 unmap:
648         for (;;) {
649                 if (!tail)
650                         tail = ppd->sdma_descq_cnt - 1;
651                 else
652                         tail--;
653                 if (tail == ppd->sdma_descq_tail)
654                         break;
655                 unmap_desc(ppd, tail);
656         }
657         qp = tx->qp;
658         qib_put_txreq(tx);
659         spin_lock(&qp->s_lock);
660         if (qp->ibqp.qp_type == IB_QPT_RC) {
661                 /* XXX what about error sending RDMA read responses? */
662                 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
663                         qib_error_qp(qp, IB_WC_GENERAL_ERR);
664         } else if (qp->s_wqe)
665                 qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
666         spin_unlock(&qp->s_lock);
667         /* return zero to process the next send work request */
668         goto unlock;
669
670 busy:
671         qp = tx->qp;
672         spin_lock(&qp->s_lock);
673         if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
674                 struct qib_ibdev *dev;
675
676                 /*
677                  * If we couldn't queue the DMA request, save the info
678                  * and try again later rather than destroying the
679                  * buffer and undoing the side effects of the copy.
680                  */
681                 tx->ss = ss;
682                 tx->dwords = dwords;
683                 qp->s_tx = tx;
684                 dev = &ppd->dd->verbs_dev;
685                 spin_lock(&dev->pending_lock);
686                 if (list_empty(&qp->iowait)) {
687                         struct qib_ibport *ibp;
688
689                         ibp = &ppd->ibport_data;
690                         ibp->n_dmawait++;
691                         qp->s_flags |= QIB_S_WAIT_DMA_DESC;
692                         list_add_tail(&qp->iowait, &dev->dmawait);
693                 }
694                 spin_unlock(&dev->pending_lock);
695                 qp->s_flags &= ~QIB_S_BUSY;
696                 spin_unlock(&qp->s_lock);
697                 ret = -EBUSY;
698         } else {
699                 spin_unlock(&qp->s_lock);
700                 qib_put_txreq(tx);
701         }
702 unlock:
703         spin_unlock_irqrestore(&ppd->sdma_lock, flags);
704         return ret;
705 }
706
707 void qib_sdma_process_event(struct qib_pportdata *ppd,
708         enum qib_sdma_events event)
709 {
710         unsigned long flags;
711
712         spin_lock_irqsave(&ppd->sdma_lock, flags);
713
714         __qib_sdma_process_event(ppd, event);
715
716         if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
717                 qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
718
719         spin_unlock_irqrestore(&ppd->sdma_lock, flags);
720 }
721
722 void __qib_sdma_process_event(struct qib_pportdata *ppd,
723         enum qib_sdma_events event)
724 {
725         struct qib_sdma_state *ss = &ppd->sdma_state;
726
727         switch (ss->current_state) {
728         case qib_sdma_state_s00_hw_down:
729                 switch (event) {
730                 case qib_sdma_event_e00_go_hw_down:
731                         break;
732                 case qib_sdma_event_e30_go_running:
733                         /*
734                          * If down, but running requested (usually result
735                          * of link up, then we need to start up.
736                          * This can happen when hw down is requested while
737                          * bringing the link up with traffic active on
738                          * 7220, e.g. */
739                         ss->go_s99_running = 1;
740                         /* fall through and start dma engine */
741                 case qib_sdma_event_e10_go_hw_start:
742                         /* This reference means the state machine is started */
743                         sdma_get(&ppd->sdma_state);
744                         sdma_set_state(ppd,
745                                        qib_sdma_state_s10_hw_start_up_wait);
746                         break;
747                 case qib_sdma_event_e20_hw_started:
748                         break;
749                 case qib_sdma_event_e40_sw_cleaned:
750                         sdma_sw_tear_down(ppd);
751                         break;
752                 case qib_sdma_event_e50_hw_cleaned:
753                         break;
754                 case qib_sdma_event_e60_hw_halted:
755                         break;
756                 case qib_sdma_event_e70_go_idle:
757                         break;
758                 case qib_sdma_event_e7220_err_halted:
759                         break;
760                 case qib_sdma_event_e7322_err_halted:
761                         break;
762                 case qib_sdma_event_e90_timer_tick:
763                         break;
764                 }
765                 break;
766
767         case qib_sdma_state_s10_hw_start_up_wait:
768                 switch (event) {
769                 case qib_sdma_event_e00_go_hw_down:
770                         sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
771                         sdma_sw_tear_down(ppd);
772                         break;
773                 case qib_sdma_event_e10_go_hw_start:
774                         break;
775                 case qib_sdma_event_e20_hw_started:
776                         sdma_set_state(ppd, ss->go_s99_running ?
777                                        qib_sdma_state_s99_running :
778                                        qib_sdma_state_s20_idle);
779                         break;
780                 case qib_sdma_event_e30_go_running:
781                         ss->go_s99_running = 1;
782                         break;
783                 case qib_sdma_event_e40_sw_cleaned:
784                         break;
785                 case qib_sdma_event_e50_hw_cleaned:
786                         break;
787                 case qib_sdma_event_e60_hw_halted:
788                         break;
789                 case qib_sdma_event_e70_go_idle:
790                         ss->go_s99_running = 0;
791                         break;
792                 case qib_sdma_event_e7220_err_halted:
793                         break;
794                 case qib_sdma_event_e7322_err_halted:
795                         break;
796                 case qib_sdma_event_e90_timer_tick:
797                         break;
798                 }
799                 break;
800
801         case qib_sdma_state_s20_idle:
802                 switch (event) {
803                 case qib_sdma_event_e00_go_hw_down:
804                         sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
805                         sdma_sw_tear_down(ppd);
806                         break;
807                 case qib_sdma_event_e10_go_hw_start:
808                         break;
809                 case qib_sdma_event_e20_hw_started:
810                         break;
811                 case qib_sdma_event_e30_go_running:
812                         sdma_set_state(ppd, qib_sdma_state_s99_running);
813                         ss->go_s99_running = 1;
814                         break;
815                 case qib_sdma_event_e40_sw_cleaned:
816                         break;
817                 case qib_sdma_event_e50_hw_cleaned:
818                         break;
819                 case qib_sdma_event_e60_hw_halted:
820                         break;
821                 case qib_sdma_event_e70_go_idle:
822                         break;
823                 case qib_sdma_event_e7220_err_halted:
824                         break;
825                 case qib_sdma_event_e7322_err_halted:
826                         break;
827                 case qib_sdma_event_e90_timer_tick:
828                         break;
829                 }
830                 break;
831
832         case qib_sdma_state_s30_sw_clean_up_wait:
833                 switch (event) {
834                 case qib_sdma_event_e00_go_hw_down:
835                         sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
836                         break;
837                 case qib_sdma_event_e10_go_hw_start:
838                         break;
839                 case qib_sdma_event_e20_hw_started:
840                         break;
841                 case qib_sdma_event_e30_go_running:
842                         ss->go_s99_running = 1;
843                         break;
844                 case qib_sdma_event_e40_sw_cleaned:
845                         sdma_set_state(ppd,
846                                        qib_sdma_state_s10_hw_start_up_wait);
847                         sdma_hw_start_up(ppd);
848                         break;
849                 case qib_sdma_event_e50_hw_cleaned:
850                         break;
851                 case qib_sdma_event_e60_hw_halted:
852                         break;
853                 case qib_sdma_event_e70_go_idle:
854                         ss->go_s99_running = 0;
855                         break;
856                 case qib_sdma_event_e7220_err_halted:
857                         break;
858                 case qib_sdma_event_e7322_err_halted:
859                         break;
860                 case qib_sdma_event_e90_timer_tick:
861                         break;
862                 }
863                 break;
864
865         case qib_sdma_state_s40_hw_clean_up_wait:
866                 switch (event) {
867                 case qib_sdma_event_e00_go_hw_down:
868                         sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
869                         sdma_start_sw_clean_up(ppd);
870                         break;
871                 case qib_sdma_event_e10_go_hw_start:
872                         break;
873                 case qib_sdma_event_e20_hw_started:
874                         break;
875                 case qib_sdma_event_e30_go_running:
876                         ss->go_s99_running = 1;
877                         break;
878                 case qib_sdma_event_e40_sw_cleaned:
879                         break;
880                 case qib_sdma_event_e50_hw_cleaned:
881                         sdma_set_state(ppd,
882                                        qib_sdma_state_s30_sw_clean_up_wait);
883                         sdma_start_sw_clean_up(ppd);
884                         break;
885                 case qib_sdma_event_e60_hw_halted:
886                         break;
887                 case qib_sdma_event_e70_go_idle:
888                         ss->go_s99_running = 0;
889                         break;
890                 case qib_sdma_event_e7220_err_halted:
891                         break;
892                 case qib_sdma_event_e7322_err_halted:
893                         break;
894                 case qib_sdma_event_e90_timer_tick:
895                         break;
896                 }
897                 break;
898
899         case qib_sdma_state_s50_hw_halt_wait:
900                 switch (event) {
901                 case qib_sdma_event_e00_go_hw_down:
902                         sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
903                         sdma_start_sw_clean_up(ppd);
904                         break;
905                 case qib_sdma_event_e10_go_hw_start:
906                         break;
907                 case qib_sdma_event_e20_hw_started:
908                         break;
909                 case qib_sdma_event_e30_go_running:
910                         ss->go_s99_running = 1;
911                         break;
912                 case qib_sdma_event_e40_sw_cleaned:
913                         break;
914                 case qib_sdma_event_e50_hw_cleaned:
915                         break;
916                 case qib_sdma_event_e60_hw_halted:
917                         sdma_set_state(ppd,
918                                        qib_sdma_state_s40_hw_clean_up_wait);
919                         ppd->dd->f_sdma_hw_clean_up(ppd);
920                         break;
921                 case qib_sdma_event_e70_go_idle:
922                         ss->go_s99_running = 0;
923                         break;
924                 case qib_sdma_event_e7220_err_halted:
925                         break;
926                 case qib_sdma_event_e7322_err_halted:
927                         break;
928                 case qib_sdma_event_e90_timer_tick:
929                         break;
930                 }
931                 break;
932
933         case qib_sdma_state_s99_running:
934                 switch (event) {
935                 case qib_sdma_event_e00_go_hw_down:
936                         sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
937                         sdma_start_sw_clean_up(ppd);
938                         break;
939                 case qib_sdma_event_e10_go_hw_start:
940                         break;
941                 case qib_sdma_event_e20_hw_started:
942                         break;
943                 case qib_sdma_event_e30_go_running:
944                         break;
945                 case qib_sdma_event_e40_sw_cleaned:
946                         break;
947                 case qib_sdma_event_e50_hw_cleaned:
948                         break;
949                 case qib_sdma_event_e60_hw_halted:
950                         sdma_set_state(ppd,
951                                        qib_sdma_state_s30_sw_clean_up_wait);
952                         sdma_start_sw_clean_up(ppd);
953                         break;
954                 case qib_sdma_event_e70_go_idle:
955                         sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
956                         ss->go_s99_running = 0;
957                         break;
958                 case qib_sdma_event_e7220_err_halted:
959                         sdma_set_state(ppd,
960                                        qib_sdma_state_s30_sw_clean_up_wait);
961                         sdma_start_sw_clean_up(ppd);
962                         break;
963                 case qib_sdma_event_e7322_err_halted:
964                         sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
965                         break;
966                 case qib_sdma_event_e90_timer_tick:
967                         break;
968                 }
969                 break;
970         }
971
972         ss->last_event = event;
973 }